code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import math
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =size
# approximate the overall size of segment tree with given value
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCamelCase =[0 for i in range(0 , 4 * size )]
__UpperCamelCase =[0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , A_ ) -> int:
return idx * 2
def _a ( self , A_ ) -> int:
return idx * 2 + 1
def _a ( self , A_ , A_ , A_ , A_ ) -> None:
if left_element == right_element:
__UpperCamelCase =a[left_element - 1]
else:
__UpperCamelCase =(left_element + right_element) // 2
self.build(self.left(A_ ) , A_ , A_ , A_ )
self.build(self.right(A_ ) , mid + 1 , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> bool:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCamelCase =val
if left_element != right_element:
__UpperCamelCase =val
__UpperCamelCase =val
__UpperCamelCase =True
__UpperCamelCase =True
return True
__UpperCamelCase =(left_element + right_element) // 2
self.update(self.left(A_ ) , A_ , A_ , A_ , A_ , A_ )
self.update(self.right(A_ ) , mid + 1 , A_ , A_ , A_ , A_ )
__UpperCamelCase =max(
self.segment_tree[self.left(A_ )] , self.segment_tree[self.right(A_ )] )
return True
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int | float:
if self.flag[idx] is True:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =False
if left_element != right_element:
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =self.lazy[idx]
__UpperCamelCase =True
__UpperCamelCase =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCamelCase =(left_element + right_element) // 2
__UpperCamelCase =self.query(self.left(A_ ) , A_ , A_ , A_ , A_ )
__UpperCamelCase =self.query(self.right(A_ ) , mid + 1 , A_ , A_ , A_ )
return max(A_ , A_ )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A_ , A_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 62
|
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int:
if index == r:
for j in range(lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : Union[str, Any] = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]:
# A temporary array to store all combination one by one
__snake_case : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_UpperCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 326
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : Optional[int] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowercase : Optional[Any] ="pt"
elif is_tf_available():
_lowercase : Union[str, Any] ="tf"
else:
_lowercase : str ="jax"
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = ByTaTokenizer
__lowerCAmelCase :int = False
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
super().setUp()
a__ : Optional[int] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Any:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> int:
"""simple docstring"""
a__ : Dict = []
for i in range(len(_snake_case ) ):
try:
a__ : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ : int = list(filter(lambda __lowercase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , _snake_case ) )
a__ : Dict = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case ) , _snake_case ) )
if max_length is not None and len(_snake_case ) > max_length:
a__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_snake_case ) < min_length and len(_snake_case ) > 0:
while len(_snake_case ) < min_length:
a__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
a__ : Any = [t[0] for t in toks]
# Ensure consistency
a__ : List[Any] = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case )
if " " not in output_txt and len(_snake_case ) > 1:
a__ : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case )
)
if with_prefix_space:
a__ : Any = """ """ + output_txt
a__ : List[Any] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Dict = self.ta_base_tokenizer
a__ : str = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
a__ : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = self.ta_base_tokenizer
a__ : str = """Unicode €."""
a__ : Tuple = tokenizer(_snake_case )
a__ : str = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["""input_ids"""] , _snake_case )
# decoding
a__ : Dict = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , """Unicode €.</s>""" )
a__ : List[str] = tokenizer("""e è é ê ë""" )
a__ : Union[str, Any] = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["""input_ids"""] , _snake_case )
# decoding
a__ : int = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Dict = self.ta_base_tokenizer
a__ : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
a__ : List[str] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
a__ : str = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
if FRAMEWORK != "jax":
a__ : Optional[int] = list(batch.input_ids.numpy()[0] )
else:
a__ : Any = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_snake_case , _snake_case )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = self.ta_base_tokenizer
a__ : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a__ : Tuple = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , _snake_case )
self.assertIn("""attention_mask""" , _snake_case )
self.assertNotIn("""decoder_input_ids""" , _snake_case )
self.assertNotIn("""decoder_attention_mask""" , _snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : str = self.ta_base_tokenizer
a__ : Tuple = [
"""Summary of the text.""",
"""Another summary.""",
]
a__ : List[Any] = tokenizer(
text_target=_snake_case , max_length=3_2 , padding="""max_length""" , truncation=_snake_case , return_tensors=_snake_case )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = self.ta_base_tokenizer
a__ : Tuple = ["""A long paragraph for summarization. </s>"""]
a__ : Optional[int] = ["""Summary of the text. </s>"""]
# fmt: off
a__ : Optional[int] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
a__ : Dict = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
a__ : List[str] = tokenizer(_snake_case , text_target=_snake_case )
self.assertEqual(_snake_case , batch["""input_ids"""][0] )
self.assertEqual(_snake_case , batch["""labels"""][0] )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
a__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ : Dict = tempfile.mkdtemp()
a__ : str = """ He is very happy, UNwant\u00E9d,running"""
a__ : Any = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
a__ : Tuple = tokenizer.__class__.from_pretrained(_snake_case )
a__ : str = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
shutil.rmtree(_snake_case )
a__ : Any = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ : Optional[Any] = tempfile.mkdtemp()
a__ : Tuple = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
a__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a__ : Any = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
a__ : List[Any] = tokenizer.__class__.from_pretrained(_snake_case )
a__ : str = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
a__ : Optional[Any] = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
with open(os.path.join(_snake_case , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a__ : List[str] = json.load(_snake_case )
with open(os.path.join(_snake_case , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a__ : Tuple = json.load(_snake_case )
a__ : Dict = [F'''<extra_id_{i}>''' for i in range(1_2_5 )]
a__ : List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
a__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_snake_case , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_snake_case , _snake_case )
with open(os.path.join(_snake_case , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_snake_case , _snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ : Optional[int] = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ : List[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_snake_case )]
a__ : Optional[Any] = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
a__ : int = tokenizer_class.from_pretrained(_snake_case )
self.assertTrue(tokenizer.decode([2_5_5] ) == """""" )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Dict = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ : List[str] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
a__ : Any = tokenizer.convert_tokens_to_string(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ : Optional[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
a__ : Union[str, Any] = 0
a__ : List[Any] = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case )
for attr in attributes_list:
setattr(_snake_case , attr + """_id""" , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + """_id""" ) , _snake_case )
setattr(_snake_case , attr + """_id""" , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + """_id""" ) , _snake_case )
setattr(_snake_case , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens_ids""" ) , [] )
setattr(_snake_case , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 170
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __snake_case :
pass
| 51
| 0
|
"""simple docstring"""
import argparse
import os
import re
lowerCamelCase__ = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase__ = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
lowerCamelCase__ = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def lowercase__ ( lowercase_ ,lowercase_ = False ) -> Any:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ) as f:
_UpperCamelCase : List[str] = f.read()
_UpperCamelCase : Dict = content.split("\n" )
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[Any] = 0
while line_idx < len(lowercase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCamelCase : List[Any] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCamelCase : Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCamelCase : Tuple = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCamelCase : List[str] = sorted(lowercase_ ,key=lambda lowercase_ : _re_identifier.search(lowercase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase_ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase_ ) )
elif "\n".join(lowercase_ ) != content:
return True
def lowercase__ ( lowercase_ = False ) -> Any:
"""simple docstring"""
_UpperCamelCase : int = [os.path.join(lowercase_ ,lowercase_ ) for f in os.listdir(lowercase_ ) if f.endswith(".py" )]
_UpperCamelCase : Optional[int] = [sort_auto_mapping(lowercase_ ,overwrite=lowercase_ ) for fname in fnames]
if not overwrite and any(lowercase_ ):
_UpperCamelCase : Dict = [f for f, d in zip(lowercase_ ,lowercase_ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(lowercase_ )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCamelCase__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 359
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__a : int , **__a : int ) -> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> Optional[Any]:
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Any = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
_UpperCamelCase : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_UpperCamelCase : List[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_UpperCamelCase : List[Any] = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[str] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_UpperCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
_UpperCamelCase : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = "facebook/detr-resnet-50"
_UpperCamelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(__a )
_UpperCamelCase : Union[str, Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
_UpperCamelCase : Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Dict = "facebook/detr-resnet-50"
_UpperCamelCase : Optional[Any] = pipeline("object-detection" , model=__a )
_UpperCamelCase : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
_UpperCamelCase : Tuple = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase : Tuple = 0.99_85
_UpperCamelCase : List[Any] = "facebook/detr-resnet-50"
_UpperCamelCase : List[str] = pipeline("object-detection" , model=__a )
_UpperCamelCase : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = "Narsil/layoutlmv3-finetuned-funsd"
_UpperCamelCase : int = 0.99_93
_UpperCamelCase : str = pipeline("object-detection" , model=__a , threshold=__a )
_UpperCamelCase : Union[str, Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 310
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase : List[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A__ , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class __a ( A__ ):
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : GenericTensor ):
'''simple docstring'''
if self.framework == "tf":
UpperCamelCase__ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase__ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : GenericTensor ):
'''simple docstring'''
UpperCamelCase__ : Any = self.get_masked_index(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : GenericTensor ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int=None , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if return_tensors is None:
UpperCamelCase__ : Optional[int] = self.framework
UpperCamelCase__ : str = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
return model_inputs
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = self.model(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = model_inputs["input_ids"]
return model_outputs
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : List[str]=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase__ : Dict = target_ids.shape[0]
UpperCamelCase__ : int = model_outputs["input_ids"][0]
UpperCamelCase__ : Tuple = model_outputs["logits"]
if self.framework == "tf":
UpperCamelCase__ : int = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase__ : int = outputs.numpy()
UpperCamelCase__ : int = outputs[0, masked_index, :]
UpperCamelCase__ : Tuple = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
UpperCamelCase__ : List[str] = tf.gather_nd(tf.squeeze(SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase__ : Union[str, Any] = tf.expand_dims(SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase__ : int = tf.math.top_k(SCREAMING_SNAKE_CASE , k=SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : int = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase__ : Dict = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase__ : int = outputs[0, masked_index, :]
UpperCamelCase__ : Dict = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase__ : Union[str, Any] = probs[..., target_ids]
UpperCamelCase__ , UpperCamelCase__ : int = probs.topk(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = []
UpperCamelCase__ : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase__ : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase__ : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase__ : Any = target_ids[p].tolist()
UpperCamelCase__ : Dict = p
# Filter padding out:
UpperCamelCase__ : Optional[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase__ : str = self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(SCREAMING_SNAKE_CASE )
result.append(SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = [targets]
try:
UpperCamelCase__ : Any = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Any = []
for target in targets:
UpperCamelCase__ : List[Any] = vocab.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if id_ is None:
UpperCamelCase__ : List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , max_length=1 , truncation=SCREAMING_SNAKE_CASE , )["input_ids"]
if len(SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCamelCase__ : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
UpperCamelCase__ : Union[str, Any] = list(set(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCamelCase__ : str = np.array(SCREAMING_SNAKE_CASE )
return target_ids
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = {}
if targets is not None:
UpperCamelCase__ : Optional[Any] = self.get_target_ids(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = target_ids
if top_k is not None:
UpperCamelCase__ : str = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 189
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCamelCase : Tuple =_symbol_database.Default()
lowerCamelCase : List[str] =_descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
lowerCamelCase : str =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCamelCase : Optional[int] =None
lowerCamelCase : Tuple =b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCamelCase : List[str] =45
lowerCamelCase : List[Any] =1581
lowerCamelCase : Optional[int] =1517
lowerCamelCase : Tuple =1570
lowerCamelCase : Dict =1584
lowerCamelCase : Optional[Any] =1793
lowerCamelCase : Dict =1795
lowerCamelCase : Any =1916
lowerCamelCase : Dict =1864
lowerCamelCase : Dict =1905
lowerCamelCase : Dict =1919
lowerCamelCase : Union[str, Any] =2429
lowerCamelCase : List[Any] =2208
lowerCamelCase : List[Any] =2418
lowerCamelCase : List[str] =2323
lowerCamelCase : Dict =2407
# @@protoc_insertion_point(module_scope)
| 189
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__A = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class lowerCamelCase__ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
lowerCamelCase = BartTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> str:
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
_lowerCAmelCase =getattr(__UpperCAmelCase , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase =pre_tok_class(**__UpperCAmelCase )
_lowerCAmelCase =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase ="""post_processor"""
_lowerCAmelCase =getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
_lowerCAmelCase =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase =tuple(state["""sep"""] )
if "cls" in state:
_lowerCAmelCase =tuple(state["""cls"""] )
_lowerCAmelCase =False
if state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase =True
if state.get("""trim_offsets""" , __UpperCAmelCase ) != trim_offsets:
_lowerCAmelCase =trim_offsets
_lowerCAmelCase =True
if changes_to_apply:
_lowerCAmelCase =getattr(__UpperCAmelCase , state.pop("""type""" ) )
_lowerCAmelCase =component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
_lowerCAmelCase =value
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> BatchEncoding:
_lowerCAmelCase =kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> BatchEncoding:
_lowerCAmelCase =kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Dict:
_lowerCAmelCase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 353
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341
| 0
|
import re
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if len(re.findall("[ATCG]" , SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "model" in orig_key:
lowerCamelCase : Dict = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1]
lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCamelCase : int = "yoso." + orig_key
return orig_key
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase : Dict = val
lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"]
lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 283
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 318
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase )
UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
UpperCAmelCase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 346
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def UpperCamelCase ( _a , _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :str = state_dict.pop(_a )
lowercase_ :int = val
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
lowercase_ :List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase_ :Union[str, Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase_ :Tuple = value
else:
lowercase_ :Optional[Any] = value
return new_state_dict
def UpperCamelCase ( _a ) -> Dict:
'''simple docstring'''
lowercase_ :str = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase_ :Optional[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowercase_ :int = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ :int = in_proj_weight[:2_5_6, :]
lowercase_ :Optional[Any] = in_proj_bias[:2_5_6]
lowercase_ :Optional[int] = in_proj_weight[2_5_6:5_1_2, :]
lowercase_ :List[str] = in_proj_bias[2_5_6:5_1_2]
lowercase_ :List[str] = in_proj_weight[-2_5_6:, :]
lowercase_ :Union[str, Any] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase_ :List[Any] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
lowercase_ :str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ :List[str] = in_proj_weight[:2_5_6, :]
lowercase_ :Any = in_proj_bias[:2_5_6]
lowercase_ :str = in_proj_weight[2_5_6:5_1_2, :]
lowercase_ :List[Any] = in_proj_bias[2_5_6:5_1_2]
lowercase_ :List[Any] = in_proj_weight[-2_5_6:, :]
lowercase_ :Tuple = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
lowercase_ :List[str] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
lowercase_ :List[Any] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase_ :Union[str, Any] = in_proj_weight_cross_attn[:2_5_6, :]
lowercase_ :Optional[int] = in_proj_bias_cross_attn[:2_5_6]
lowercase_ :int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
lowercase_ :List[Any] = in_proj_bias_cross_attn[2_5_6:5_1_2]
lowercase_ :int = in_proj_weight_cross_attn[-2_5_6:, :]
lowercase_ :Tuple = in_proj_bias_cross_attn[-2_5_6:]
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
lowercase_ :List[Any] = image.size
lowercase_ :str = max(_a , _a )
lowercase_ :Tuple = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
lowercase_ :Union[str, Any] = target_max_size / current_max_size
lowercase_ :int = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
lowercase_ :Optional[int] = F.to_tensor(_a )
lowercase_ :str = F.normalize(_a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def UpperCamelCase ( _a , _a , _a ) -> Any:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
lowercase_ :int = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_a , _a , _a )
lowercase_ :List[Any] = rename_backbone_keys(_a )
# query, key and value matrices need special treatment
read_in_q_k_v(_a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase_ :int = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase_ :Any = state_dict.pop(_a )
lowercase_ :Any = val
# create HuggingFace model and load state dict
lowercase_ :Dict = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase_ :Optional[int] = 1_5
lowercase_ :Any = 2
lowercase_ :Optional[Any] = {0: '''table''', 1: '''table rotated'''}
lowercase_ :Any = idalabel
lowercase_ :Optional[Any] = {v: k for k, v in idalabel.items()}
else:
lowercase_ :Tuple = 1_2_5
lowercase_ :Any = 6
lowercase_ :Tuple = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
lowercase_ :Tuple = idalabel
lowercase_ :str = {v: k for k, v in idalabel.items()}
lowercase_ :Union[str, Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
lowercase_ :int = TableTransformerForObjectDetection(_a )
model.load_state_dict(_a )
model.eval()
# verify our conversion
lowercase_ :str = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
lowercase_ :List[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=_a )
lowercase_ :List[str] = Image.open(_a ).convert('''RGB''' )
lowercase_ :List[str] = normalize(resize(_a , _a ) ).unsqueeze(0 )
lowercase_ :Tuple = model(_a )
if "detection" in checkpoint_url:
lowercase_ :List[Any] = (1, 1_5, 3)
lowercase_ :int = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase_ :Any = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase_ :Optional[int] = (1, 1_2_5, 7)
lowercase_ :Union[str, Any] = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase_ :Optional[int] = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _a , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _a , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
image_processor.save_pretrained(_a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
lowercase_ :Optional[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_a )
image_processor.push_to_hub(_a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 359
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE : Dict = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCamelCase ( _a , _a=None ) -> Optional[int]:
'''simple docstring'''
require_version(deps[pkg] , _a )
| 252
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase )-> YolosConfig:
'''simple docstring'''
UpperCAmelCase : List[str] =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Union[str, Any] =1_92
UpperCAmelCase : Tuple =7_68
UpperCAmelCase : Optional[Any] =12
UpperCAmelCase : str =3
UpperCAmelCase : Optional[int] =[8_00, 13_33]
UpperCAmelCase : int =False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Tuple =3_30
UpperCAmelCase : List[Any] =14
UpperCAmelCase : Tuple =6
UpperCAmelCase : Dict =13_20
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] =3_84
UpperCAmelCase : Tuple =15_36
UpperCAmelCase : int =12
UpperCAmelCase : Dict =6
elif "yolos_b" in yolos_name:
UpperCAmelCase : List[str] =[8_00, 13_44]
UpperCAmelCase : List[str] =91
UpperCAmelCase : Tuple ='''huggingface/label-files'''
UpperCAmelCase : Tuple ='''coco-detection-id2label.json'''
UpperCAmelCase : Union[str, Any] =json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Any ={int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] =idalabel
UpperCAmelCase : str ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False )-> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Any =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : List[str] =in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : List[str] =in_proj_bias[: config.hidden_size]
UpperCAmelCase : int =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : Any =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : Union[str, Any] =in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Any =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : Tuple =name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase : Tuple =name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase : int =name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase : Optional[Any] =name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase : List[str] =name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase : Union[str, Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase : Tuple =name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase : List[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase : Optional[int] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase : str =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase : Tuple =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : str =name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase : Dict =name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase : Optional[Any] =name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase : List[str] =name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : List[Any] =orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCAmelCase : Optional[int] =key.split('''.''' )
UpperCAmelCase : Union[str, Any] =int(key_split[2] )
UpperCAmelCase : str =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Union[str, Any] =val[:dim, :]
UpperCAmelCase : Dict =val[
dim : dim * 2, :
]
UpperCAmelCase : List[Any] =val[-dim:, :]
else:
UpperCAmelCase : Any =val[:dim]
UpperCAmelCase : Any =val[dim : dim * 2]
UpperCAmelCase : Union[str, Any] =val[-dim:]
else:
UpperCAmelCase : List[Any] =val
return orig_state_dict
def lowerCAmelCase_ ( )-> torch.Tensor:
'''simple docstring'''
UpperCAmelCase : List[str] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Dict =Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] =get_yolos_config(_lowercase )
# load original state_dict
UpperCAmelCase : Tuple =torch.load(_lowercase , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase : Optional[Any] =YolosForObjectDetection(_lowercase )
model.eval()
UpperCAmelCase : Optional[int] =convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : List[Any] =8_00 if yolos_name != '''yolos_ti''' else 5_12
UpperCAmelCase : Tuple =YolosImageProcessor(format='''coco_detection''' , size=_lowercase )
UpperCAmelCase : Dict =image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Dict =model(**_lowercase )
UpperCAmelCase , UpperCAmelCase : Dict =outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Tuple =None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : Any =torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase : List[str] =torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Dict =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase : Dict =torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : Tuple =torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase : Optional[Any] =torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : str =torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase : Any =torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str =torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase : Dict =torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
UpperCAmelCase : Any ={
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase : Optional[Any] =model_mapping[yolos_name]
image_processor.push_to_hub(_lowercase , organization='''hustvl''' )
model.push_to_hub(_lowercase , organization='''hustvl''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 348
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ = 'docs/source/en/_toctree.yml'
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> List[str]:
UpperCamelCase__ : Optional[int] = defaultdict(__UpperCAmelCase )
UpperCamelCase__ : str = []
UpperCamelCase__ : Union[str, Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__UpperCAmelCase )
UpperCamelCase__ : Tuple = new_doc_list
UpperCamelCase__ : Optional[Any] = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ : str = []
for duplicate_key in duplicates:
UpperCamelCase__ : Optional[Any] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__UpperCAmelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCamelCase__ : Union[str, Any] = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCAmelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__UpperCAmelCase )
# Sort
return overview_doc
def lowerCAmelCase_ ( __UpperCAmelCase: List[str]=False ) -> List[str]:
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCamelCase__ : str = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ : Optional[int] = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ : List[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase__ : str = api_doc[scheduler_idx]['''sections''']
UpperCamelCase__ : Union[str, Any] = clean_doc_toc(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase__ : Union[str, Any] = True
if overwrite:
UpperCamelCase__ : int = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase__ : Any = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int]=False ) -> Optional[Any]:
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ : Tuple = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ : Optional[int] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : List[Any] = api_doc[pipeline_idx]['''sections''']
UpperCamelCase__ : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase__ : Optional[int] = pipeline_doc['''section''']
UpperCamelCase__ : str = clean_doc_toc(__UpperCAmelCase )
if overwrite:
UpperCamelCase__ : Tuple = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCAmelCase )
# sort overall pipeline doc
UpperCamelCase__ : Optional[int] = clean_doc_toc(__UpperCAmelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase__ : List[str] = True
if overwrite:
UpperCamelCase__ : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase__ : Any = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCAmelCase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 247
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase_ = '\nHuman: <<task>>\n\nAssistant: '
UpperCAmelCase_ = 'huggingface-tools/default-prompts'
UpperCAmelCase_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[Any]="run" ) -> int:
if prompt_or_repo_id is None:
UpperCamelCase__ : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __UpperCAmelCase ) is not None:
return prompt_or_repo_id
UpperCamelCase__ : Any = cached_file(
__UpperCAmelCase , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 247
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_UpperCamelCase , AssumeRolePolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_UpperCamelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
return iam_client.get_role(RoleName=_UpperCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =_ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCamelCase , )
_SCREAMING_SNAKE_CASE =None
if credentials_configuration == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_SCREAMING_SNAKE_CASE =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_SCREAMING_SNAKE_CASE =_ask_field('AWS Access Key ID: ' )
_SCREAMING_SNAKE_CASE =aws_access_key_id
_SCREAMING_SNAKE_CASE =_ask_field('AWS Secret Access Key: ' )
_SCREAMING_SNAKE_CASE =aws_secret_access_key
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_SCREAMING_SNAKE_CASE =aws_region
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCamelCase , )
if role_management == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your IAM role name: ' )
else:
_SCREAMING_SNAKE_CASE ='accelerate_sagemaker_execution_role'
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_custom_docker_image:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your Docker image: ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_inputs_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_metrics_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_dynamo:
_SCREAMING_SNAKE_CASE ='dynamo_'
_SCREAMING_SNAKE_CASE =_ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_custom_options:
_SCREAMING_SNAKE_CASE =_ask_options(
'Which mode do you want to use?' , _UpperCamelCase , lambda _UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCamelCase )] , default='default' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
_SCREAMING_SNAKE_CASE =_ask_options(
_UpperCamelCase , _UpperCamelCase , lambda _UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_SCREAMING_SNAKE_CASE =_ask_field(_UpperCamelCase , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , default='ml.p3.2xlarge' )
_SCREAMING_SNAKE_CASE =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_SCREAMING_SNAKE_CASE =_ask_field(
'How many machines do you want use? [1]: ' , _UpperCamelCase , default=1 , )
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCamelCase , use_cpu=_UpperCamelCase , dynamo_config=_UpperCamelCase , eca_instance_type=_UpperCamelCase , profile=_UpperCamelCase , region=_UpperCamelCase , iam_role_name=_UpperCamelCase , mixed_precision=_UpperCamelCase , num_machines=_UpperCamelCase , sagemaker_inputs_file=_UpperCamelCase , sagemaker_metrics_file=_UpperCamelCase , )
| 47
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341
| 0
|
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__UpperCAmelCase : Dict = HfArgumentParser(InitializationArguments)
__UpperCAmelCase : List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__UpperCAmelCase : Union[str, Any] = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__UpperCAmelCase : str = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
'''simple docstring'''
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
lowerCamelCase_, lowerCamelCase_ : List[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase_, lowerCamelCase_ : Any = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
lowerCamelCase__ : Optional[int] = botoa.client('iam' )
lowerCamelCase__ : Optional[Any] = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_UpperCAmelCase , AssumeRolePolicyDocument=json.dumps(_UpperCAmelCase , indent=2 ) )
lowerCamelCase__ : Optional[Any] = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_UpperCAmelCase , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_UpperCAmelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : int = botoa.client('iam' )
return iam_client.get_role(RoleName=_UpperCAmelCase )["Role"]["Arn"]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : Optional[Any] = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = None
if credentials_configuration == 0:
lowerCamelCase__ : Dict = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
lowerCamelCase__ : Union[str, Any] = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
lowerCamelCase__ : str = _ask_field('AWS Access Key ID: ' )
lowerCamelCase__ : Optional[int] = aws_access_key_id
lowerCamelCase__ : List[str] = _ask_field('AWS Secret Access Key: ' )
lowerCamelCase__ : str = aws_secret_access_key
lowerCamelCase__ : Tuple = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
lowerCamelCase__ : Any = aws_region
lowerCamelCase__ : Optional[Any] = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCAmelCase , )
if role_management == 0:
lowerCamelCase__ : int = _ask_field('Enter your IAM role name: ' )
else:
lowerCamelCase__ : List[Any] = 'accelerate_sagemaker_execution_role'
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message='Please enter yes or no.' , )
lowerCamelCase__ : Dict = None
if is_custom_docker_image:
lowerCamelCase__ : Tuple = _ask_field('Enter your Docker image: ' , lambda _UpperCAmelCase : str(_UpperCAmelCase ).lower() )
lowerCamelCase__ : Optional[Any] = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message='Please enter yes or no.' , )
lowerCamelCase__ : int = None
if is_sagemaker_inputs_enabled:
lowerCamelCase__ : Union[str, Any] = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCAmelCase : str(_UpperCAmelCase ).lower() , )
lowerCamelCase__ : str = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message='Please enter yes or no.' , )
lowerCamelCase__ : Any = None
if is_sagemaker_metrics_enabled:
lowerCamelCase__ : List[Any] = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCAmelCase : str(_UpperCAmelCase ).lower() , )
lowerCamelCase__ : Tuple = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : Optional[Any] = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message='Please enter yes or no.' , )
if use_dynamo:
lowerCamelCase__ : Optional[int] = 'dynamo_'
lowerCamelCase__ : List[str] = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowerCamelCase__ : str = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message='Please enter yes or no.' , )
if use_custom_options:
lowerCamelCase__ : Union[str, Any] = _ask_options(
'Which mode do you want to use?' , _UpperCAmelCase , lambda _UpperCAmelCase : TORCH_DYNAMO_MODES[int(_UpperCAmelCase )] , default='default' , )
lowerCamelCase__ : Optional[int] = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message='Please enter yes or no.' , )
lowerCamelCase__ : List[str] = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message='Please enter yes or no.' , )
lowerCamelCase__ : str = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
lowerCamelCase__ : Optional[Any] = _ask_options(
_UpperCAmelCase , _UpperCAmelCase , lambda _UpperCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCAmelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowerCamelCase__ : List[Any] = _ask_field(_UpperCAmelCase , lambda _UpperCAmelCase : str(_UpperCAmelCase ).lower() , default='ml.p3.2xlarge' )
lowerCamelCase__ : Optional[Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowerCamelCase__ : str = _ask_field(
'How many machines do you want use? [1]: ' , _UpperCAmelCase , default=1 , )
lowerCamelCase__ : str = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_UpperCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCAmelCase , use_cpu=_UpperCAmelCase , dynamo_config=_UpperCAmelCase , eca_instance_type=_UpperCAmelCase , profile=_UpperCAmelCase , region=_UpperCAmelCase , iam_role_name=_UpperCAmelCase , mixed_precision=_UpperCAmelCase , num_machines=_UpperCAmelCase , sagemaker_inputs_file=_UpperCAmelCase , sagemaker_metrics_file=_UpperCAmelCase , )
| 360
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : int = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : str , _A : Tuple=7 , _A : Any=3 , _A : Optional[Any]=30 , _A : Dict=4_00 , _A : Any=True , _A : Optional[Any]=None , _A : Optional[Any]=True , _A : List[Any]=[0.5, 0.5, 0.5] , _A : int=[0.5, 0.5, 0.5] , _A : int=True , _A : Optional[int]=1 / 2_55 , _A : Tuple=True , ) -> str:
UpperCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Optional[Any] = min_resolution
UpperCAmelCase_ : Any = max_resolution
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : Optional[int] = image_std
UpperCAmelCase_ : List[Any] = do_rescale
UpperCAmelCase_ : List[str] = rescale_factor
UpperCAmelCase_ : int = do_pad
def A ( self : Optional[int] ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[int] , _A : List[str] , _A : str=False ) -> List[Any]:
if not batched:
UpperCAmelCase_ : str = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Dict = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : Any = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : Tuple = self.size['''shortest_edge''']
UpperCAmelCase_ : List[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : str = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : Optional[int] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( a_ , unittest.TestCase):
a_ = YolosImageProcessor if is_vision_available() else None
def A ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : int ) -> str:
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , _A )
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _A )
def A ( self : int ) -> Union[str, Any]:
pass
def A ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : Dict = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCAmelCase_ : List[str] = image_processing_a.pad(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Any = image_processing_a(_A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ : List[str] = json.loads(f.read() )
UpperCAmelCase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
UpperCAmelCase_ : List[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
UpperCAmelCase_ : int = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase_ : int = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase_ : str = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase_ : str = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase_ : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase_ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
UpperCAmelCase_ : Optional[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.loads(f.read() )
UpperCAmelCase_ : Dict = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
UpperCAmelCase_ : int = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase_ : Union[str, Any] = YolosImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase_ : Union[str, Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ : Optional[int] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase_ : Any = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase_ : Tuple = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase_ : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase_ : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
UpperCAmelCase_ : List[Any] = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
UpperCAmelCase_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase_ : Tuple = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 304
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
def extract(*A , **A ):
class __lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = torch.ones([0] )
def __A ( self , A ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(A )
return self
return Out()
return extract
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=A )
assert isinstance(A , A )
assert isinstance(pipe.scheduler , A )
assert pipe.safety_checker is None
lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
lowerCamelCase = StableDiffusionPipeline.from_pretrained(A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowerCamelCase = unet.half()
lowerCamelCase = vae.half()
lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowerCamelCase = 40_03_66_03_46
lowerCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowerCamelCase = 27_34_97_17_55
lowerCamelCase = 7
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowerCamelCase = 10_44_35_52_34
lowerCamelCase = 12
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 252
| 0
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = generate_pascal_triangle(__lowerCamelCase )
for row_idx in range(__lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx], end=''' ''' )
else:
print(triangle[row_idx][col_idx], end='''''' )
print()
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
SCREAMING_SNAKE_CASE_ = []
for current_row_idx in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = populate_current_row(__lowerCamelCase, __lowerCamelCase )
triangle.append(__lowerCamelCase )
return triangle
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1, 1
for current_col_idx in range(1, __lowerCamelCase ):
calculate_current_element(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return current_row
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = triangle[current_row_idx - 1][current_col_idx - 1]
SCREAMING_SNAKE_CASE_ = triangle[current_row_idx - 1][current_col_idx]
SCREAMING_SNAKE_CASE_ = above_to_left_elt + above_to_right_elt
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
SCREAMING_SNAKE_CASE_ = [[1]]
for row_index in range(1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [0] + result[-1] + [0]
SCREAMING_SNAKE_CASE_ = row_index + 1
# Calculate the number of distinct elements in a row
SCREAMING_SNAKE_CASE_ = sum(divmod(__lowerCamelCase, 2 ) )
SCREAMING_SNAKE_CASE_ = [
temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1 )
]
SCREAMING_SNAKE_CASE_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
SCREAMING_SNAKE_CASE_ = row_first_half + row_second_half
result.append(__lowerCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase, __lowerCamelCase ) -> None:
SCREAMING_SNAKE_CASE_ = F'''{func.__name__}({value})'''
SCREAMING_SNAKE_CASE_ = timeit(F'''__main__.{call}''', setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCamelCase, __lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 354
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ) -> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_pad
def _UpperCamelCase ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCamelCase ( self , _A , _A=False ) -> str:
if not batched:
SCREAMING_SNAKE_CASE_ = image_inputs[0]
if isinstance(_A , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ = max(_A , key=lambda _A : item[0] )[0]
SCREAMING_SNAKE_CASE_ = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ConditionalDetrImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _A )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _A )
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCamelCase ( self ) -> str:
# prepare image and target
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'''image_id''': 39769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
SCREAMING_SNAKE_CASE_ = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def _UpperCamelCase ( self ) -> Tuple:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE_ = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
SCREAMING_SNAKE_CASE_ = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 257
| 0
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = []
for line in lines:
A__ = re.sub(R"#.*" , "" , lowercase_ ) # remove comments
if line:
filtered_lines.append(lowercase_ )
A__ = "\n".join(lowercase_ )
# Make a hash from all this code
A__ = full_str.encode("utf-8" )
return shaaaa(lowercase_ ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 247
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , **lowercase_ ) -> List[Any]:
A__ = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
A__ = AutoModelForSeqaSeqLM.from_config(lowercase_ )
model.save_pretrained(lowercase_ )
AutoTokenizer.from_pretrained(lowercase_ ).save_pretrained(lowercase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 247
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = '''data2vec-text'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]=30522 , _SCREAMING_SNAKE_CASE: Tuple=768 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: str=12 , _SCREAMING_SNAKE_CASE: Any=3072 , _SCREAMING_SNAKE_CASE: int="gelu" , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=512 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-12 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: Tuple=2 , _SCREAMING_SNAKE_CASE: Optional[Any]="absolute" , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = classifier_dropout
class _UpperCamelCase ( lowerCAmelCase_ ):
@property
def lowercase ( self: int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 328
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase = False
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = 'ybelkada/fonts'
def lowerCAmelCase_ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
requires_backends(UpperCamelCase_ , ["torch"] )
_check_torch_version()
UpperCamelCase_ = image_tensor.unsqueeze(0 )
UpperCamelCase_ = torch.nn.functional.unfold(UpperCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase_ , UpperCamelCase_ , -1 )
UpperCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 36 , UpperCamelCase_ = "black" , UpperCamelCase_ = "white" , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Image.Image:
requires_backends(UpperCamelCase_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_ = textwrap.TextWrapper(width=80 )
UpperCamelCase_ = wrapper.wrap(text=UpperCamelCase_ )
UpperCamelCase_ = "\n".join(UpperCamelCase_ )
if font_bytes is not None and font_path is None:
UpperCamelCase_ = io.BytesIO(UpperCamelCase_ )
elif font_path is not None:
UpperCamelCase_ = font_path
else:
UpperCamelCase_ = hf_hub_download(UpperCamelCase_ , "Arial.TTF" )
UpperCamelCase_ = ImageFont.truetype(UpperCamelCase_ , encoding="UTF-8" , size=UpperCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , UpperCamelCase_ ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = temp_draw.textbbox((0, 0) , UpperCamelCase_ , UpperCamelCase_ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_ = text_width + left_padding + right_padding
UpperCamelCase_ = text_height + top_padding + bottom_padding
UpperCamelCase_ = Image.new("RGB" , (image_width, image_height) , UpperCamelCase_ )
UpperCamelCase_ = ImageDraw.Draw(UpperCamelCase_ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase_ , fill=UpperCamelCase_ , font=UpperCamelCase_ )
return image
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
requires_backends(UpperCamelCase_ , "vision" )
# Convert to PIL image if necessary
UpperCamelCase_ = to_pil_image(UpperCamelCase_ )
UpperCamelCase_ = render_text(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ = max(header_image.width , image.width )
UpperCamelCase_ = int(image.height * (new_width / image.width) )
UpperCamelCase_ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_ = to_numpy_array(UpperCamelCase_ )
if infer_channel_dimension_format(UpperCamelCase_ ) == ChannelDimension.LAST:
UpperCamelCase_ = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = ['''flattened_patches''']
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_convert_rgb
UpperCamelCase_ = max_patches
UpperCamelCase_ = is_vqa
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: dict , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase_ = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
UpperCamelCase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = patch_size["height"], patch_size["width"]
UpperCamelCase_ , UpperCamelCase_ = get_image_size(_SCREAMING_SNAKE_CASE )
# maximize scale s.t.
UpperCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE , antialias=_SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = torch_extract_patches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patches.shape
UpperCamelCase_ = patches_shape[1]
UpperCamelCase_ = patches_shape[2]
UpperCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , _SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_ = row_ids.to(torch.floataa )
UpperCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_ = to_numpy_array(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_ = np.mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = np.std(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> ImageInput:
"""simple docstring"""
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_ = self.is_vqa
if kwargs.get("data_format" , _SCREAMING_SNAKE_CASE ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase_ = kwargs.pop("font_bytes" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = kwargs.pop("font_path" , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [header_text] * len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [
render_header(_SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=_SCREAMING_SNAKE_CASE , font_path=_SCREAMING_SNAKE_CASE )
for i, image in enumerate(_SCREAMING_SNAKE_CASE )
]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_ = [
self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , patch_size=_SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
UpperCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_outputs
| 328
| 1
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _A :
_UpperCamelCase : int = MBartConfig
_UpperCamelCase : Any = {}
_UpperCamelCase : Tuple = '''gelu'''
def __init__( self : Optional[Any] , _A : str , _A : List[Any]=13 , _A : List[str]=7 , _A : Optional[Any]=True , _A : Tuple=False , _A : int=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[str]=4 , _A : Union[str, Any]=37 , _A : str=0.1 , _A : Any=0.1 , _A : List[str]=20 , _A : Any=2 , _A : Optional[int]=1 , _A : Any=0 , ) -> List[str]:
"""simple docstring"""
lowercase : str = parent
lowercase : Any = batch_size
lowercase : Optional[Any] = seq_length
lowercase : List[Any] = is_training
lowercase : Any = use_labels
lowercase : str = vocab_size
lowercase : Any = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : Dict = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : List[str] = eos_token_id
lowercase : Optional[int] = pad_token_id
lowercase : Any = bos_token_id
def __a ( self : Tuple ) -> int:
"""simple docstring"""
lowercase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : Union[str, Any] = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def __a ( self : Optional[int] , _A : Optional[Any] , _A : int ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = TFMBartModel(config=_UpperCAmelCase ).get_decoder()
lowercase : List[str] = inputs_dict['''input_ids''']
lowercase : Dict = input_ids[:1, :]
lowercase : List[str] = inputs_dict['''attention_mask'''][:1, :]
lowercase : int = inputs_dict['''head_mask''']
lowercase : List[Any] = 1
# first forward pass
lowercase : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
lowercase , lowercase : Optional[Any] = outputs.to_tuple()
lowercase : Optional[Any] = past_key_values[1]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
lowercase : Tuple = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _A ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Union[str, Any] = False
def __a ( self : Tuple , _A : List[str] , _A : Union[str, Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ) -> int:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __a ( self : str ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = TFMBartModelTester(self )
lowercase : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase )
def __a ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _A ( unittest.TestCase ):
_UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
_UpperCamelCase : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
_UpperCamelCase : Dict = '''facebook/mbart-large-en-ro'''
@cached_property
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __a ( self : List[str] ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __a ( self : Union[str, Any] , **_A : Any ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] = self.translate_src_text(**_UpperCAmelCase )
self.assertListEqual(self.expected_text , _UpperCAmelCase )
def __a ( self : Union[str, Any] , **_A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='''tf''' )
lowercase : Optional[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase : Dict = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def __a ( self : Any ) -> Dict:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 308
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''vqvae''']
def __init__( self : List[str] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1000
@torch.no_grad()
def __call__( self : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : Dict=True , ):
'''simple docstring'''
_A = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
_A = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_A = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_A = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
_A = noise
_A = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
_A = self.mel.audio_slice_to_image(__UpperCAmelCase )
_A = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_A = (input_image / 255) * 2 - 1
_A = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_A = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
_A = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
_A = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_A = int(mask_start_secs * pixels_per_second )
_A = int(mask_end_secs * pixels_per_second )
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["sample"]
else:
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
if isinstance(self.scheduler , __UpperCAmelCase ):
_A = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
else:
_A = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_A = mask[:, step, :, :mask_start]
if mask_end > 0:
_A = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_A = 1 / self.vqvae.config.scaling_factor * images
_A = self.vqvae.decode(__UpperCAmelCase )["sample"]
_A = (images / 2 + 0.5).clamp(0 , 1 )
_A = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_A = (images * 255).round().astype("uint8" )
_A = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
_A = [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
_A = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_A = (sample / 255) * 2 - 1
_A = torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_A = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_A = self.scheduler.alphas_cumprod[t]
_A = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_A = 1 - alpha_prod_t
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
_A = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_A = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_A = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ):
'''simple docstring'''
_A = acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 174
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 256
|
"""simple docstring"""
import numpy as np
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowercase_ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
lowercase_ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __UpperCAmelCase ( self , _a , _a , _a = False , _a = False , _a = False , _a = False , ):
__a = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__a = [[refs[i] for refs in references] for i in range(_a )]
__a = TER(
normalized=_a , no_punct=_a , asian_support=_a , case_sensitive=_a , )
__a = sb_ter.corpus_score(_a , _a )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 11
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCAmelCase = s.rsplit(a__ , a__ )
return new.join(a__ )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase = {}
__lowerCAmelCase = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowerCAmelCase = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
__lowerCAmelCase = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
__lowerCAmelCase = rreplace(a__ , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
__lowerCAmelCase = rreplace(a__ , ".b" , ".bias" , 1 )
__lowerCAmelCase = value.float()
return upgrade
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=True ):
from dall_e import Encoder
__lowerCAmelCase = Encoder()
if os.path.exists(a__ ):
__lowerCAmelCase = torch.load(a__ )
else:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(a__ )
if isinstance(a__ , a__ ):
__lowerCAmelCase = ckpt.state_dict()
encoder.load_state_dict(a__ )
if config_path is not None:
__lowerCAmelCase = FlavaImageCodebookConfig.from_pretrained(a__ )
else:
__lowerCAmelCase = FlavaImageCodebookConfig()
__lowerCAmelCase = FlavaImageCodebook(a__ ).eval()
__lowerCAmelCase = encoder.state_dict()
__lowerCAmelCase = upgrade_state_dict(a__ )
hf_model.load_state_dict(a__ )
__lowerCAmelCase = hf_model.state_dict()
__lowerCAmelCase = count_parameters(a__ )
__lowerCAmelCase = count_parameters(a__ )
assert torch.allclose(a__ , a__ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(a__ )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
UpperCamelCase__ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 92
|
def __lowercase ( a__ , a__ ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11
| 1
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( snake_case : Tuple , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Tuple ) -> str:
'''simple docstring'''
__UpperCamelCase = TapasConfig.from_json_file(snake_case )
# set absolute/relative position embeddings parameter
__UpperCamelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCamelCase = TapasForQuestionAnswering(config=snake_case )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCamelCase = 4
__UpperCamelCase = True
# hparam_utils.py hparams
__UpperCamelCase = 0.664694
__UpperCamelCase = 0.207951
__UpperCamelCase = 0.121194
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = 0.0352513
__UpperCamelCase = TapasForQuestionAnswering(config=snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCamelCase = 4
__UpperCamelCase = False
# hparam_utils.py hparams
__UpperCamelCase = 36.4519
__UpperCamelCase = 0.903421
__UpperCamelCase = 222.088
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = 0.763141
__UpperCamelCase = TapasForQuestionAnswering(config=snake_case )
elif task == "TABFACT":
__UpperCamelCase = TapasForSequenceClassification(config=snake_case )
elif task == "MLM":
__UpperCamelCase = TapasForMaskedLM(config=snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCamelCase = TapasModel(config=snake_case )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(snake_case , snake_case , snake_case )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(snake_case )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(snake_case )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 328
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ : Any = parser.parse_args()
lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ : List[str] = CLIPImageProcessor()
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 328
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "efficientnet"
def __init__( self : Dict , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : Any = width_coefficient
__lowerCamelCase : Any = depth_coefficient
__lowerCamelCase : Any = depth_divisor
__lowerCamelCase : Optional[Any] = kernel_sizes
__lowerCamelCase : Union[str, Any] = in_channels
__lowerCamelCase : List[Any] = out_channels
__lowerCamelCase : Optional[Any] = depthwise_padding
__lowerCamelCase : int = strides
__lowerCamelCase : int = num_block_repeats
__lowerCamelCase : Optional[Any] = expand_ratios
__lowerCamelCase : int = squeeze_expansion_ratio
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Union[str, Any] = pooling_type
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = batch_norm_eps
__lowerCamelCase : Optional[int] = batch_norm_momentum
__lowerCamelCase : Any = dropout_rate
__lowerCamelCase : List[Any] = drop_connect_rate
__lowerCamelCase : int = sum(UpperCAmelCase ) * 4
class _snake_case ( a__ ):
snake_case__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ):
return 1E-5
| 64
|
"""simple docstring"""
import requests
__A = '''YOUR API KEY'''
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str = giphy_api_key ) -> list:
'''simple docstring'''
__lowerCamelCase : Dict = "+".join(query.split() )
__lowerCamelCase : Optional[int] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__lowerCamelCase : Optional[Any] = requests.get(_lowerCamelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 64
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["torch", "scipy"]
def __init__( self : Any, *UpperCAmelCase__ : Optional[Any], **UpperCAmelCase__ : Dict ):
requires_backends(self, ["torch", "scipy"] )
@classmethod
def _lowercase ( cls : str, *UpperCAmelCase__ : str, **UpperCAmelCase__ : List[str] ):
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def _lowercase ( cls : Tuple, *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Optional[Any] ):
requires_backends(cls, ["torch", "scipy"] )
| 17
|
'''simple docstring'''
import argparse
import os
import re
_UpperCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
_UpperCAmelCase : Any = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_UpperCAmelCase : List[Any] = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_UpperCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_UpperCAmelCase : Tuple = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_UpperCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = _re_indent.search(lowerCamelCase)
return "" if search is None else search.groups()[0]
def __magic_name__( lowerCamelCase, lowerCamelCase="", lowerCamelCase=None, lowerCamelCase=None):
__lowerCAmelCase = 0
__lowerCAmelCase = code.split('''\n''')
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase):
index += 1
__lowerCAmelCase = ['''\n'''.join(lines[:index])]
else:
__lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCAmelCase = [lines[index]]
index += 1
while index < len(lowerCamelCase) and (end_prompt is None or not lines[index].startswith(lowerCamelCase)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(lowerCamelCase) > 0 and get_indent(current_block[-1]).startswith(indent_level + ''' '''):
current_block.append(lines[index])
blocks.append('''\n'''.join(lowerCamelCase))
if index < len(lowerCamelCase) - 1:
__lowerCAmelCase = [lines[index + 1]]
index += 1
else:
__lowerCAmelCase = []
else:
blocks.append('''\n'''.join(lowerCamelCase))
__lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase) > 0:
blocks.append('''\n'''.join(lowerCamelCase))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase):
blocks.append('''\n'''.join(lines[index:]))
return blocks
def __magic_name__( lowerCamelCase):
def _inner(lowerCamelCase):
return key(lowerCamelCase).lower().replace('''_''', '''''')
return _inner
def __magic_name__( lowerCamelCase, lowerCamelCase=None):
# If no key is provided, we use a noop.
def noop(lowerCamelCase):
return x
if key is None:
__lowerCAmelCase = noop
# Constants are all uppercase, they go first.
__lowerCAmelCase = [obj for obj in objects if key(lowerCamelCase).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCAmelCase = [obj for obj in objects if key(lowerCamelCase)[0].isupper() and not key(lowerCamelCase).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCAmelCase = [obj for obj in objects if not key(lowerCamelCase)[0].isupper()]
__lowerCAmelCase = ignore_underscore(lowerCamelCase)
return sorted(lowerCamelCase, key=lowerCamelCase) + sorted(lowerCamelCase, key=lowerCamelCase) + sorted(lowerCamelCase, key=lowerCamelCase)
def __magic_name__( lowerCamelCase):
# This inner function sort imports between [ ].
def _replace(lowerCamelCase):
__lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
__lowerCAmelCase = [part.strip().replace('''"''', '''''') for part in imports.split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
__lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase)]) + "]"
__lowerCAmelCase = import_statement.split('''\n''')
if len(lowerCamelCase) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCAmelCase = 2 if lines[1].strip() == '''[''' else 1
__lowerCAmelCase = [(i, _re_strip_line.search(lowerCamelCase).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
__lowerCAmelCase = sort_objects(lowerCamelCase, key=lambda lowerCamelCase: x[1])
__lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(lowerCamelCase) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
__lowerCAmelCase = _re_bracket_content.sub(_replace, lines[1])
else:
__lowerCAmelCase = [part.strip().replace('''"''', '''''') for part in lines[1].split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
__lowerCAmelCase = keys[:-1]
__lowerCAmelCase = get_indent(lines[1]) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase)])
return "\n".join(lowerCamelCase)
else:
# Finally we have to deal with imports fitting on one line
__lowerCAmelCase = _re_bracket_content.sub(_replace, lowerCamelCase)
return import_statement
def __magic_name__( lowerCamelCase, lowerCamelCase=True):
with open(lowerCamelCase, encoding='''utf-8''') as f:
__lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCAmelCase = split_code_in_indented_blocks(
lowerCamelCase, start_prompt='''_import_structure = {''', end_prompt='''if TYPE_CHECKING:''')
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(lowerCamelCase) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCAmelCase = main_blocks[block_idx]
__lowerCAmelCase = block.split('''\n''')
# Get to the start of the imports.
__lowerCAmelCase = 0
while line_idx < len(lowerCamelCase) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCAmelCase = len(lowerCamelCase)
else:
line_idx += 1
if line_idx >= len(lowerCamelCase):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCAmelCase = '''\n'''.join(block_lines[line_idx:-1])
__lowerCAmelCase = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
__lowerCAmelCase = split_code_in_indented_blocks(lowerCamelCase, indent_level=lowerCamelCase)
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCAmelCase = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCAmelCase = [(pattern.search(lowerCamelCase).groups()[0] if pattern.search(lowerCamelCase) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCAmelCase = [(i, key) for i, key in enumerate(lowerCamelCase) if key is not None]
__lowerCAmelCase = [x[0] for x in sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCAmelCase = 0
__lowerCAmelCase = []
for i in range(len(lowerCamelCase)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
__lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(lowerCamelCase)
count += 1
# And we put our main block back together with its first and last line.
__lowerCAmelCase = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(lowerCamelCase):
if check_only:
return True
else:
print(F"""Overwriting {file}.""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write('''\n'''.join(lowerCamelCase))
def __magic_name__( lowerCamelCase=True):
__lowerCAmelCase = []
for root, _, files in os.walk(lowerCamelCase):
if "__init__.py" in files:
__lowerCAmelCase = sort_imports(os.path.join(lowerCamelCase, '''__init__.py'''), check_only=lowerCamelCase)
if result:
__lowerCAmelCase = [os.path.join(lowerCamelCase, '''__init__.py''')]
if len(lowerCamelCase) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase)} files, run `make style`.""")
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_UpperCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 174
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCamelCase : Optional[int] = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 0
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('image_processor_type' )
UpperCamelCase = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'clip-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase = AutoImageProcessor.from_pretrained('clip-base' )
def A ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='aaaaaa' )
def A ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def A ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register('custom' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A ( self : Optional[int] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register('custom' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(UpperCamelCase__ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 249
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowerCAmelCase__ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
lowerCAmelCase__ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
lowerCAmelCase__ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , ) -> int:
_A : Optional[int] = len(references[0])
if any(len(__lowerCamelCase) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
_A : int = [[refs[i] for refs in references] for i in range(__lowerCamelCase)]
_A : Tuple = TER(
normalized=__lowerCamelCase , no_punct=__lowerCamelCase , asian_support=__lowerCamelCase , case_sensitive=__lowerCamelCase , )
_A : str = sb_ter.corpus_score(__lowerCamelCase , __lowerCamelCase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 11
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11
| 1
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A__ : int = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A__ : str = 'hopper-medium-v2'
A__ : Dict = gym.make(env_name)
A__ : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A__ : Dict = env.reset()
A__ : Optional[int] = 0
A__ : str = 0
A__ : List[Any] = 1_000
A__ : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A__ : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
A__ , A__ , A__ , A__ : Any = env.step(denorm_actions)
A__ : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
A__ : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 209
|
"""simple docstring"""
import torch
def _snake_case ( ) -> Union[str, Any]:
if torch.cuda.is_available():
lowerCamelCase_ : int =torch.cuda.device_count()
else:
lowerCamelCase_ : List[str] =0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 209
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __UpperCamelCase ( _A , _A=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def __UpperCamelCase ( _A , _A=0 ):
lowerCAmelCase_ = []
for old_item in old_list:
lowerCAmelCase_ = old_item.replace('''in_layers.0''' , '''norm1''' )
lowerCAmelCase_ = new_item.replace('''in_layers.2''' , '''conv1''' )
lowerCAmelCase_ = new_item.replace('''out_layers.0''' , '''norm2''' )
lowerCAmelCase_ = new_item.replace('''out_layers.3''' , '''conv2''' )
lowerCAmelCase_ = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
lowerCAmelCase_ = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
lowerCAmelCase_ = shave_segments(_A , n_shave_prefix_segments=_A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __UpperCamelCase ( _A , _A=0 ):
lowerCAmelCase_ = []
for old_item in old_list:
lowerCAmelCase_ = old_item
lowerCAmelCase_ = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
lowerCAmelCase_ = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
lowerCAmelCase_ = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
lowerCAmelCase_ = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
lowerCAmelCase_ = shave_segments(_A , n_shave_prefix_segments=_A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __UpperCamelCase ( _A , _A , _A , _A=None , _A=None , _A=None ):
assert isinstance(_A , _A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase_ = old_checkpoint[path]
lowerCAmelCase_ = old_tensor.shape[0] // 3
lowerCAmelCase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase_ = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowerCAmelCase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase_ = query.reshape(_A )
lowerCAmelCase_ = key.reshape(_A )
lowerCAmelCase_ = value.reshape(_A )
for path in paths:
lowerCAmelCase_ = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase_ = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
lowerCAmelCase_ = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
lowerCAmelCase_ = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase_ = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase_ = old_checkpoint[path['''old''']][:, :, 0]
else:
lowerCAmelCase_ = old_checkpoint[path['''old''']]
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = checkpoint['''time_embed.0.weight''']
lowerCAmelCase_ = checkpoint['''time_embed.0.bias''']
lowerCAmelCase_ = checkpoint['''time_embed.2.weight''']
lowerCAmelCase_ = checkpoint['''time_embed.2.bias''']
lowerCAmelCase_ = checkpoint['''input_blocks.0.0.weight''']
lowerCAmelCase_ = checkpoint['''input_blocks.0.0.bias''']
lowerCAmelCase_ = checkpoint['''out.0.weight''']
lowerCAmelCase_ = checkpoint['''out.0.bias''']
lowerCAmelCase_ = checkpoint['''out.2.weight''']
lowerCAmelCase_ = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowerCAmelCase_ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
for layer_id in range(_A )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase_ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
for layer_id in range(_A )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase_ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
for layer_id in range(_A )
}
for i in range(1 , _A ):
lowerCAmelCase_ = (i - 1) // (config['''num_res_blocks'''] + 1)
lowerCAmelCase_ = (i - 1) % (config['''num_res_blocks'''] + 1)
lowerCAmelCase_ = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
lowerCAmelCase_ = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in checkpoint:
lowerCAmelCase_ = checkpoint[
f"input_blocks.{i}.0.op.weight"
]
lowerCAmelCase_ = checkpoint[
f"input_blocks.{i}.0.op.bias"
]
continue
lowerCAmelCase_ = renew_resnet_paths(_A )
lowerCAmelCase_ = {'''old''': f"input_blocks.{i}.0", '''new''': f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
lowerCAmelCase_ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
_A , _A , _A , additional_replacements=[meta_path, resnet_op] , config=_A )
if len(_A ):
lowerCAmelCase_ = renew_attention_paths(_A )
lowerCAmelCase_ = {
'''old''': f"input_blocks.{i}.1",
'''new''': f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
lowerCAmelCase_ = {
f"input_blocks.{i}.1.qkv.bias": {
'''key''': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"input_blocks.{i}.1.qkv.weight": {
'''key''': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_A , _A , _A , additional_replacements=[meta_path] , attention_paths_to_split=_A , config=_A , )
lowerCAmelCase_ = middle_blocks[0]
lowerCAmelCase_ = middle_blocks[1]
lowerCAmelCase_ = middle_blocks[2]
lowerCAmelCase_ = renew_resnet_paths(_A )
assign_to_checkpoint(_A , _A , _A , config=_A )
lowerCAmelCase_ = renew_resnet_paths(_A )
assign_to_checkpoint(_A , _A , _A , config=_A )
lowerCAmelCase_ = renew_attention_paths(_A )
lowerCAmelCase_ = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
_A , _A , _A , attention_paths_to_split=_A , config=_A )
for i in range(_A ):
lowerCAmelCase_ = i // (config['''num_res_blocks'''] + 1)
lowerCAmelCase_ = i % (config['''num_res_blocks'''] + 1)
lowerCAmelCase_ = [shave_segments(_A , 2 ) for name in output_blocks[i]]
lowerCAmelCase_ = {}
for layer in output_block_layers:
lowerCAmelCase_ , lowerCAmelCase_ = layer.split('''.''' )[0], shave_segments(_A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_A )
else:
lowerCAmelCase_ = [layer_name]
if len(_A ) > 1:
lowerCAmelCase_ = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
lowerCAmelCase_ = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
lowerCAmelCase_ = renew_resnet_paths(_A )
lowerCAmelCase_ = renew_resnet_paths(_A )
lowerCAmelCase_ = {'''old''': f"output_blocks.{i}.0", '''new''': f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase_ = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
lowerCAmelCase_ = checkpoint[
f"output_blocks.{i}.{index}.conv.weight"
]
lowerCAmelCase_ = checkpoint[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(_A ) == 2:
lowerCAmelCase_ = []
if len(_A ):
lowerCAmelCase_ = renew_attention_paths(_A )
lowerCAmelCase_ = {
'''old''': f"output_blocks.{i}.1",
'''new''': f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
lowerCAmelCase_ = {
f"output_blocks.{i}.1.qkv.bias": {
'''key''': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"output_blocks.{i}.1.qkv.weight": {
'''key''': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
_A , _A , _A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_A , )
else:
lowerCAmelCase_ = renew_resnet_paths(_A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase_ = '''.'''.join(['''output_blocks''', str(_A ), path['''old''']] )
lowerCAmelCase_ = '''.'''.join(['''up_blocks''', str(_A ), '''resnets''', str(_A ), path['''new''']] )
lowerCAmelCase_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_A = parser.parse_args()
_A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_A = json.loads(f.read())
_A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_A = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_A = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 167
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = ['pixel_values']
def __init__( self, UpperCamelCase__ = True, UpperCamelCase__ = 32, UpperCamelCase__=PILImageResampling.BILINEAR, UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = size_divisor
lowerCAmelCase_ = resample
super().__init__(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ = height // size_divisor * size_divisor
lowerCAmelCase_ = width // size_divisor * size_divisor
lowerCAmelCase_ = resize(UpperCamelCase__, (new_h, new_w), resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
return rescale(image=UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = ChannelDimension.FIRST, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCAmelCase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
lowerCAmelCase_ = [self.resize(UpperCamelCase__, size_divisor=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(UpperCamelCase__, scale=1 / 255 ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
| 167
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , snake_case__ : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Any, a_: str=13, a_: int=7, a_: Any=True, a_: Union[str, Any]=False, a_: str=99, a_: List[Any]=16, a_: Optional[Any]=2, a_: int=4, a_: int=4, a_: Dict="gelu", a_: int=0.1, a_: Dict=0.1, a_: Tuple=32, a_: Optional[int]=2, a_: Any=1, a_: List[str]=0, a_: Union[str, Any]=0.02, ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Tuple = batch_size
_snake_case : int = seq_length
_snake_case : Optional[int] = is_training
_snake_case : Tuple = use_labels
_snake_case : List[str] = vocab_size
_snake_case : Dict = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[str] = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : Tuple = initializer_range
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
_snake_case : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
_snake_case : List[str] = shift_tokens_right(a_, 1, 2 )
_snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=a_, )
_snake_case : Tuple = prepare_blenderbot_inputs_dict(a_, a_, a_ )
return config, inputs_dict
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: Any, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 20
_snake_case : List[str] = model_class_name(a_ )
_snake_case : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_snake_case , _snake_case : Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case : Tuple = model.init_cache(decoder_input_ids.shape[0], a_, a_ )
_snake_case : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="""i4""" )
_snake_case : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_snake_case : List[str] = model.decode(
decoder_input_ids[:, :-1], a_, decoder_attention_mask=a_, past_key_values=a_, decoder_position_ids=a_, )
_snake_case : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="""i4""" )
_snake_case : Tuple = model.decode(
decoder_input_ids[:, -1:], a_, decoder_attention_mask=a_, past_key_values=outputs_cache.past_key_values, decoder_position_ids=a_, )
_snake_case : List[str] = model.decode(a_, a_ )
_snake_case : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: List[Any], a_: Any ):
'''simple docstring'''
_snake_case : Dict = 20
_snake_case : Optional[int] = model_class_name(a_ )
_snake_case : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_snake_case , _snake_case : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
_snake_case : List[Any] = model.init_cache(decoder_input_ids.shape[0], a_, a_ )
_snake_case : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_snake_case : str = model.decode(
decoder_input_ids[:, :-1], a_, decoder_attention_mask=a_, past_key_values=a_, decoder_position_ids=a_, )
_snake_case : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="""i4""" )
_snake_case : Optional[int] = model.decode(
decoder_input_ids[:, -1:], a_, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=a_, decoder_position_ids=a_, )
_snake_case : Optional[int] = model.decode(a_, a_, decoder_attention_mask=a_ )
_snake_case : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
@require_flax
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = 99
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
_snake_case : Any = input_ids.shape[0]
_snake_case : int = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Dict = self._get_config_and_data()
_snake_case : List[str] = FlaxBlenderbotForConditionalGeneration(a_ )
_snake_case : str = lm_model(input_ids=a_ )
_snake_case : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
_snake_case : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(a_ )
_snake_case : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
_snake_case : Dict = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
_snake_case : Optional[Any] = lm_model(input_ids=a_, decoder_input_ids=a_ )
_snake_case : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape, a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
_snake_case : List[str] = shift_tokens_right(a_, 1, 2 )
_snake_case : int = np.equal(a_, 1 ).astype(np.floataa ).sum()
_snake_case : Optional[int] = np.equal(a_, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(a_, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class lowercase( __a , unittest.TestCase , __a ):
'''simple docstring'''
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = FlaxBlenderbotModelTester(self )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_, a_, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : List[Any] = self._prepare_for_class(a_, a_ )
_snake_case : List[Any] = model_class(a_ )
@jax.jit
def encode_jitted(a_: str, a_: Optional[Any]=None, **a_: List[str] ):
return model.encode(input_ids=a_, attention_mask=a_ )
with self.subTest("""JIT Enabled""" ):
_snake_case : Optional[Any] = encode_jitted(**a_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case : int = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ), len(a_ ) )
for jitted_output, output in zip(a_, a_ ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : int = model_class(a_ )
_snake_case : int = model.encode(inputs_dict["""input_ids"""], inputs_dict["""attention_mask"""] )
_snake_case : Optional[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a_: Any, a_: Tuple, a_: int ):
return model.decode(
decoder_input_ids=a_, decoder_attention_mask=a_, encoder_outputs=a_, )
with self.subTest("""JIT Enabled""" ):
_snake_case : Any = decode_jitted(**a_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case : List[str] = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ), len(a_ ) )
for jitted_output, output in zip(a_, a_ ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_snake_case : List[Any] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case : Union[str, Any] = model(a_ )
self.assertIsNotNone(a_ )
@unittest.skipUnless(jax_device != """cpu""", """3B test too slow on CPU.""" )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_snake_case : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_snake_case : List[str] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""", from_pt=a_ )
_snake_case : Union[str, Any] = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
_snake_case : str = ["""Sam"""]
_snake_case : Tuple = tokenizer(a_, return_tensors="""jax""" )
_snake_case : str = model.generate(**a_, **a_ )
_snake_case : List[Any] = """Sam is a great name. It means \"sun\" in Gaelic."""
_snake_case : int = tokenizer.batch_decode(a_, **a_ )
assert generated_txt[0].strip() == tgt_text
| 64
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : List[Any] = """"""
else:
_snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Union[str, Any] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = ViTMSNConfig()
_snake_case : Any = 10_00
_snake_case : Tuple = """datasets/huggingface/label-files"""
_snake_case : Dict = """imagenet-1k-id2label.json"""
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case : Tuple = 3_84
_snake_case : Dict = 15_36
_snake_case : Tuple = 6
elif "l16" in checkpoint_url:
_snake_case : Any = 10_24
_snake_case : int = 40_96
_snake_case : str = 24
_snake_case : Optional[int] = 16
_snake_case : List[Any] = 0.1
elif "b4" in checkpoint_url:
_snake_case : Tuple = 4
elif "l7" in checkpoint_url:
_snake_case : int = 7
_snake_case : Dict = 10_24
_snake_case : Optional[Any] = 40_96
_snake_case : Any = 24
_snake_case : Union[str, Any] = 16
_snake_case : Optional[int] = 0.1
_snake_case : int = ViTMSNModel(snake_case__ )
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""]
_snake_case : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_snake_case : int = model(**snake_case__ )
_snake_case : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64
| 1
|
lowerCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
def __magic_name__ ( __a : str = 4_000_000 ):
'''simple docstring'''
UpperCamelCase__ = [0, 1]
UpperCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCamelCase__ = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'{solution() = }')
| 244
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
UpperCamelCase =1_00_00
UpperCamelCase =None
UpperCamelCase =None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
UpperCamelCase =ParquetConfig
def _lowerCamelCase ( self ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase_ , (str, list, tuple) ):
__lowercase : str = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : int = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__lowercase : int = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : Any = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase_ ):
with open(UpperCamelCase_ , '''rb''' ) as f:
__lowercase : Any = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase_ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def _lowerCamelCase ( self , UpperCamelCase_ ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase : Tuple = table_cast(UpperCamelCase_ , self.info.features.arrow_schema )
return pa_table
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Union[str, Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ):
with open(UpperCamelCase_ , '''rb''' ) as f:
__lowercase : Union[str, Any] = pq.ParquetFile(UpperCamelCase_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__lowercase : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase_ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase_ )}: {e}""" )
raise
| 249
| 0
|
class snake_case_ :
def __init__( self : Optional[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = {}
def UpperCAmelCase__ ( self : List[Any] )->None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_snake_case , """ -> """ , """ -> """.join([str(_snake_case ) for j in self.vertex[i]] ) )
def UpperCAmelCase__ ( self : List[str] , _snake_case : int , _snake_case : int )->None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_snake_case )
else:
# else make a new vertex
__lowerCAmelCase : List[str] = [to_vertex]
def UpperCAmelCase__ ( self : Union[str, Any] )->None:
'''simple docstring'''
__lowerCAmelCase : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Any , _snake_case : int , _snake_case : list )->None:
'''simple docstring'''
__lowerCAmelCase : Dict = True
print(_snake_case , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_snake_case , _snake_case )
if __name__ == "__main__":
_UpperCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 232
|
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> bytes:
__lowerCAmelCase : List[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowerCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_UpperCAmelCase = input('Enter Video/IGTV url: ').strip()
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 232
| 1
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = XLMTokenizer
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = '''lower newer'''
lowerCamelCase__ = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ = '''lower'''
lowerCamelCase__ = ['''low''', '''er</w>''']
lowerCamelCase__ = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tokens + ['''<unk>''']
lowerCamelCase__ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowerCamelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 209
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowerCamelCase__ = getattr(__snake_case ,__snake_case )
if weight_type is not None:
lowerCamelCase__ = getattr(__snake_case ,__snake_case ).shape
else:
lowerCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase__ = None
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,hf_model.config.feat_extract_norm == '''group''' ,)
lowerCamelCase__ = True
elif name.split('''.''' )[0] == "proj":
lowerCamelCase__ = fairseq_model.proj
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(__snake_case )[0].split('''.''' )[-2]
lowerCamelCase__ = mapped_key.replace('''*''' ,__snake_case )
if "weight_g" in name:
lowerCamelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ = '''weight_v'''
elif "bias" in name:
lowerCamelCase__ = '''bias'''
elif "weight" in name:
lowerCamelCase__ = '''weight'''
else:
lowerCamelCase__ = None
set_recursively(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ = name.split('''.''' )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__snake_case ,__snake_case ,bias=__snake_case )
lowerCamelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
with open(__snake_case ,'''r''' ,encoding='''utf-8''' ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.split(''' ''' )[0] for line in lines]
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = WavaVecaConfig.from_pretrained(__snake_case )
lowerCamelCase__ = SpeechaTextaConfig.from_pretrained(
__snake_case ,vocab_size=__snake_case ,decoder_layers=__snake_case ,do_stable_layer_norm=__snake_case )
lowerCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=__snake_case ,return_attention_mask=__snake_case ,)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCamelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase__ = WavaVecaModel(__snake_case )
lowerCamelCase__ = recursively_load_weights_wavaveca(model.encoder ,__snake_case )
lowerCamelCase__ = SpeechaTextaForCausalLM(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCamelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCamelCase__ = SpeechEncoderDecoderModel(encoder=__snake_case ,decoder=__snake_case )
lowerCamelCase__ = False
# add projection layer
lowerCamelCase__ = nn.Parameter(projection_layer.weight )
lowerCamelCase__ = nn.Parameter(projection_layer.bias )
lowerCamelCase__ = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case ,'''vocab.json''' ) ,'''w''' ) as fp:
json.dump(__snake_case ,__snake_case )
lowerCamelCase__ = SpeechaTextaTokenizer(os.path.join(__snake_case ,'''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
lowerCamelCase__ = hf_wavavec.config.to_dict()
lowerCamelCase__ = tokenizer.pad_token_id
lowerCamelCase__ = tokenizer.bos_token_id
lowerCamelCase__ = tokenizer.eos_token_id
lowerCamelCase__ = '''speech_to_text_2'''
lowerCamelCase__ = '''wav2vec2'''
lowerCamelCase__ = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 209
| 1
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCAmelCase : Dict = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def A_( A : Optional[Any] , A : Optional[Any] , A : Optional[int] , A : List[Any] , A : List[Any] , A : Optional[Any]):
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''')
if not ops[op](version.parse(A) , version.parse(A)):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''')
def A_( A : str , A : Optional[str] = None):
UpperCamelCase = f'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , A):
UpperCamelCase , UpperCamelCase , UpperCamelCase = requirement, None, None
else:
UpperCamelCase = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , A)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f''' got {requirement}''')
UpperCamelCase , UpperCamelCase = match[0]
UpperCamelCase = want_full.split(',') # there could be multiple requirements
UpperCamelCase = {}
for w in want_range:
UpperCamelCase = re.findall(r'^([\s!=<>]{1,2})(.+)' , A)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f''' but got {requirement}''')
UpperCamelCase , UpperCamelCase = match[0]
UpperCamelCase = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys())}, but got {op}''')
# special case
if pkg == "python":
UpperCamelCase = '.'.join([str(A) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(A , A , A , A , A , A)
return
# check if any version is installed
try:
UpperCamelCase = importlib.metadata.version(A)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''')
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(A , A , A , A , A , A)
def A_( A : int):
UpperCamelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(A , A)
| 251
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase : Union[str, Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase : List[str] = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase : Dict = '▁'
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , )-> None:
'''simple docstring'''
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(A_ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A_ )
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __getstate__( self )-> int:
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 251
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Dict = """luke"""
def __init__( self : int , _lowerCamelCase : Dict=5_02_67 , _lowerCamelCase : Tuple=50_00_00 , _lowerCamelCase : Any=7_68 , _lowerCamelCase : List[str]=2_56 , _lowerCamelCase : int=12 , _lowerCamelCase : int=12 , _lowerCamelCase : Optional[Any]=30_72 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : int=5_12 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : int=1E-12 , _lowerCamelCase : Any=True , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=0 , _lowerCamelCase : List[Any]=2 , **_lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : Union[str, Any] = vocab_size
A_ : Any = entity_vocab_size
A_ : List[Any] = hidden_size
A_ : List[str] = entity_emb_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : List[Any] = intermediate_size
A_ : str = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Any = layer_norm_eps
A_ : List[Any] = use_entity_aware_attention
A_ : Optional[Any] = classifier_dropout
| 167
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase):
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = tempfile.mkdtemp()
# fmt: off
A_ : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A_ : Tuple = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
A_ : Tuple = {'''unk_token''': '''<unk>'''}
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
A_ : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
A_ : str = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : Any , **_lowerCamelCase : Dict ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : Dict , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A_ : Dict = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : int = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[Any] = self.get_image_processor()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : Optional[int] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : int = processor(text=_lowerCamelCase )
A_ : Any = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[Any] = self.get_tokenizer()
A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[int] = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[str] = processor.batch_decode(_lowerCamelCase )
A_ : str = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 167
| 1
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> bool:
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase ) + 1
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase : List[str] = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
# since string of zero length match pattern of zero length
_UpperCAmelCase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase ):
for j in range(1 , lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase : Optional[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase : str = dp[i - 1][j]
else:
_UpperCAmelCase : int = 0
else:
_UpperCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE_ = 'aab'
SCREAMING_SNAKE_CASE_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 368
|
from typing import Any
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = data
_UpperCAmelCase : Any = None
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : str = temp.next
print()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Node(A_ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Tuple = new_node
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Tuple = node_a.next
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 189
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCAmelCase__ = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE : Dict = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
SCREAMING_SNAKE_CASE : Union[str, Any] = 'translator'
SCREAMING_SNAKE_CASE : str = AutoTokenizer
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : Union[str, Any] = LANGUAGE_CODES
SCREAMING_SNAKE_CASE : Union[str, Any] = ['text', 'text', 'text']
SCREAMING_SNAKE_CASE : Tuple = ['text']
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
__lowercase = self.lang_to_code[src_lang]
__lowercase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase__ ,return_tensors='''pt''' ,src_lang=lowercase__ ,tgt_lang=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ):
return self.model.generate(**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ):
return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=lowercase__ )
| 104
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 0
|
"""simple docstring"""
import torch
from transformers import AutoModel
class A_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self: Dict , a: Optional[int]="sayef/fsner-bert-base-uncased" ):
super(a , self ).__init__()
__lowerCamelCase : Union[str, Any] = AutoModel.from_pretrained(a , return_dict=a )
__lowerCamelCase : List[Any] = torch.nn.CosineSimilarity(3 , 1e-08 )
__lowerCamelCase : int = torch.nn.Softmax(dim=1 )
def _snake_case ( self: int , **a: Optional[int] ):
return self.bert(**a ).last_hidden_state
def _snake_case ( self: Optional[Any] , a: Tuple ):
return token_embeddings.sum(2 , keepdim=a )
def _snake_case ( self: Optional[int] , a: str , a: List[str] , a: Any=1 ):
return self.softmax(T * self.cos(a , a ) )
def _snake_case ( self: Dict , a: List[Any] , a: Optional[Any] ):
__lowerCamelCase : int = W_supports['sizes'].tolist()
__lowerCamelCase : List[Any] = W_supports['start_token_id'].item()
__lowerCamelCase : Optional[int] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__lowerCamelCase : Dict = self.BERT(**a )
__lowerCamelCase : List[Any] = self.BERT(**a )
__lowerCamelCase : Any = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Union[str, Any] = W_supports['input_ids'] == start_token_id
__lowerCamelCase : Any = W_supports['input_ids'] == end_token_id
for i, size in enumerate(a ):
if i == 0:
__lowerCamelCase : Optional[int] = 0
else:
__lowerCamelCase : Union[str, Any] = support_sizes[i - 1]
__lowerCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
__lowerCamelCase : str = S[s : s + size][end_token_masks[s : s + size]]
__lowerCamelCase : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__lowerCamelCase : str = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__lowerCamelCase : Optional[int] = torch.vstack((p_starts, p_start) )
__lowerCamelCase : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
__lowerCamelCase : Any = p_start
__lowerCamelCase : Optional[Any] = p_end
return p_starts, p_ends
| 362
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit_text_model"""
def __init__( self: Optional[int] , a: Dict=4_9408 , a: Optional[Any]=512 , a: Dict=2048 , a: Optional[Any]=12 , a: Tuple=8 , a: Union[str, Any]=16 , a: str="quick_gelu" , a: List[Any]=1e-5 , a: Dict=0.0 , a: Optional[int]=0.0_2 , a: Dict=1.0 , a: Any=0 , a: Union[str, Any]=4_9406 , a: Any=4_9407 , **a: Dict , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Tuple = initializer_factor
@classmethod
def _snake_case ( cls: Dict , a: Union[str, os.PathLike] , **a: Optional[int] ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Optional[Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit_vision_model"""
def __init__( self: int , a: Tuple=768 , a: int=3072 , a: List[str]=12 , a: Optional[Any]=12 , a: Optional[int]=3 , a: Optional[int]=768 , a: Optional[Any]=32 , a: Optional[int]="quick_gelu" , a: Union[str, Any]=1e-5 , a: Union[str, Any]=0.0 , a: Union[str, Any]=0.0_2 , a: int=1.0 , **a: Union[str, Any] , ):
super().__init__(**a )
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : int = num_channels
__lowerCamelCase : Optional[Any] = image_size
__lowerCamelCase : Tuple = patch_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : List[Any] = initializer_factor
@classmethod
def _snake_case ( cls: Optional[int] , a: Union[str, os.PathLike] , **a: int ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__lowerCamelCase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """owlvit"""
__snake_case = True
def __init__( self: Dict , a: int=None , a: str=None , a: Tuple=512 , a: Tuple=2.6_5_9_2 , a: int=True , **a: int , ):
super().__init__(**a )
if text_config is None:
__lowerCamelCase : List[str] = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__lowerCamelCase : str = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__lowerCamelCase : List[Any] = OwlViTTextConfig(**a )
__lowerCamelCase : str = OwlViTVisionConfig(**a )
__lowerCamelCase : Union[str, Any] = projection_dim
__lowerCamelCase : Tuple = logit_scale_init_value
__lowerCamelCase : Dict = return_dict
__lowerCamelCase : Tuple = 1.0
@classmethod
def _snake_case ( cls: str , a: Union[str, os.PathLike] , **a: List[Any] ):
cls._set_token_in_kwargs(a )
__lowerCamelCase , __lowerCamelCase : List[Any] = cls.get_config_dict(a , **a )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
@classmethod
def _snake_case ( cls: Tuple , a: Dict , a: Dict , **a: str ):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[str] = text_config
__lowerCamelCase : Optional[int] = vision_config
return cls.from_dict(a , **a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : List[Any] = self.text_config.to_dict()
__lowerCamelCase : List[str] = self.vision_config.to_dict()
__lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: str ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def _snake_case ( self: Dict ):
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def _snake_case ( self: int ):
return 1e-4
def _snake_case ( self: Any , a: "ProcessorMixin" , a: int = -1 , a: int = -1 , a: Optional["TensorType"] = None , ):
__lowerCamelCase : List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=a , seq_length=a , framework=a )
__lowerCamelCase : int = super().generate_dummy_inputs(
processor.image_processor , batch_size=a , framework=a )
return {**text_input_dict, **image_input_dict}
@property
def _snake_case ( self: int ):
return 14
| 194
| 0
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : int = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
lowercase : List[str] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
lowercase : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Dict = set()
__UpperCamelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__UpperCamelCase : Tuple = char
__UpperCamelCase : Tuple = set(_lowerCamelCase)
return pairs
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Optional[Any] , a :Union[str, Any] , a :Dict , a :Any="<s>" , a :Optional[int]="</s>" , a :List[Any]="</s>" , a :Dict="<s>" , a :Tuple="<unk>" , a :Union[str, Any]="<pad>" , a :Tuple="<mask>" , **a :Any , ) -> str:
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
__UpperCamelCase : Union[str, Any] = vocab_file
__UpperCamelCase : List[Any] = merges_file
__UpperCamelCase : str = {}
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : List[str] = 1
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[int] = 3
self.add_from_file(a )
__UpperCamelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
__UpperCamelCase : List[str] = merges_handle.read().split("\n" )[:-1]
__UpperCamelCase : Tuple = [tuple(merge.split()[:-1] ) for merge in merges]
__UpperCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__UpperCamelCase : Union[str, Any] = {}
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : Union[str, Any] = [self.cls_token_id]
__UpperCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self :List[str] , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self :List[Any] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self :List[str] ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self :Optional[Any] , a :Optional[int] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCamelCase : List[Any] = tuple(a )
__UpperCamelCase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__UpperCamelCase : List[str] = get_pairs(a )
if not pairs:
return token
while True:
__UpperCamelCase : Tuple = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = bigram
__UpperCamelCase : Dict = []
__UpperCamelCase : Dict = 0
while i < len(a ):
try:
__UpperCamelCase : Tuple = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase : List[str] = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase : Any = tuple(a )
__UpperCamelCase : Optional[int] = new_word
if len(a ) == 1:
break
else:
__UpperCamelCase : Tuple = get_pairs(a )
__UpperCamelCase : Optional[int] = "@@ ".join(a )
__UpperCamelCase : Any = word[:-4]
__UpperCamelCase : List[Any] = word
return word
def _lowerCamelCase ( self :List[str] , a :Tuple ) -> Union[str, Any]:
__UpperCamelCase : Any = []
__UpperCamelCase : Optional[Any] = re.findall(r"\S+\n?" , a )
for token in words:
split_tokens.extend(list(self.bpe(a ).split(" " ) ) )
return split_tokens
def _lowerCamelCase ( self :Any , a :str ) -> int:
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self :Optional[int] , a :Optional[int] ) -> Optional[int]:
return self.decoder.get(a , self.unk_token )
def _lowerCamelCase ( self :List[str] , a :Dict ) -> Dict:
__UpperCamelCase : Dict = " ".join(a ).replace("@@ " , "" ).strip()
return out_string
def _lowerCamelCase ( self :int , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Union[str, Any] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
if os.path.abspath(self.merges_file ) != os.path.abspath(a ):
copyfile(self.merges_file , a )
return out_vocab_file, out_merge_file
def _lowerCamelCase ( self :List[str] , a :str ) -> Dict:
if isinstance(a , a ):
try:
with open(a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
__UpperCamelCase : Any = f.readlines()
for lineTmp in lines:
__UpperCamelCase : Tuple = lineTmp.strip()
__UpperCamelCase : List[str] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__UpperCamelCase : Any = line[:idx]
__UpperCamelCase : List[Any] = len(self.encoder )
| 232
|
import math
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCamelCase : List[Any] = input("Enter message: ")
__UpperCamelCase : Optional[int] = int(input(F'Enter key [2-{len(_lowerCamelCase) - 1}]: '))
__UpperCamelCase : str = input("Encryption/Decryption [e/d]: ")
if mode.lower().startswith("e"):
__UpperCamelCase : List[str] = encrypt_message(_lowerCamelCase , _lowerCamelCase)
elif mode.lower().startswith("d"):
__UpperCamelCase : Dict = decrypt_message(_lowerCamelCase , _lowerCamelCase)
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = [""] * key
for col in range(_lowerCamelCase):
__UpperCamelCase : Any = col
while pointer < len(_lowerCamelCase):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Any = math.ceil(len(_lowerCamelCase) / key)
__UpperCamelCase : Any = key
__UpperCamelCase : str = (num_cols * num_rows) - len(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = [""] * num_cols
__UpperCamelCase : Dict = 0
__UpperCamelCase : int = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__UpperCamelCase : List[Any] = 0
row += 1
return "".join(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 232
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCamelCase ( _UpperCamelCase : Union[str, Any]="ro" , _UpperCamelCase : Dict="en" , _UpperCamelCase : Dict="wmt16" , _UpperCamelCase : Optional[int]=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__UpperCAmelCase : str = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
__UpperCAmelCase : Dict = datasets.load_dataset(_UpperCamelCase , _UpperCamelCase )
if save_dir is None:
__UpperCAmelCase : List[Any] = f'''{dataset}-{pair}'''
__UpperCAmelCase : Tuple = Path(_UpperCamelCase )
save_dir.mkdir(exist_ok=_UpperCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__UpperCAmelCase : List[str] = """val""" if split == """validation""" else split
__UpperCAmelCase : str = save_dir.joinpath(f'''{fn}.source''' )
__UpperCAmelCase : str = save_dir.joinpath(f'''{fn}.target''' )
__UpperCAmelCase : Any = src_path.open("""w+""" )
__UpperCAmelCase : Dict = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__UpperCAmelCase : Optional[int] = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 320
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : List[str] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MaskFormerFeatureExtractor"]
UpperCamelCase_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
UpperCamelCase_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 251
| 1
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase__ ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , repo_id="test-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : int = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_ ( self ) -> str:
CustomConfig.register_for_auto_class()
_SCREAMING_SNAKE_CASE : List[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 4_2 )
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_SCREAMING_SNAKE_CASE : Dict = c.n_embd + 1 # int
_SCREAMING_SNAKE_CASE : int = c.resid_pdrop + 1.0 # float
_SCREAMING_SNAKE_CASE : Tuple = not c.scale_attn_weights # bool
_SCREAMING_SNAKE_CASE : Optional[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(__lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(__lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(__lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(__lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = PretrainedConfig()
_SCREAMING_SNAKE_CASE : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [key for key, value in config_common_kwargs.items() if value == getattr(__lowerCamelCase , __lowerCamelCase )]
if len(__lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(__lowerCamelCase )}.""" )
def UpperCamelCase_ ( self ) -> str:
with self.assertRaises(__lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
_SCREAMING_SNAKE_CASE : Any = mock.Mock()
_SCREAMING_SNAKE_CASE : Optional[int] = 5_0_0
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = HTTPError
_SCREAMING_SNAKE_CASE : Optional[int] = {}
# Download this model to make sure it's in the cache.
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__lowerCamelCase ) as mock_head:
_SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
_SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained("bert-base-cased" )
_SCREAMING_SNAKE_CASE : Dict = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(__lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_SCREAMING_SNAKE_CASE : str = ["config.42.0.0.json"]
_SCREAMING_SNAKE_CASE : Any = 7_6_8
configuration.save_pretrained(__lowerCamelCase )
shutil.move(os.path.join(__lowerCamelCase , "config.4.0.0.json" ) , os.path.join(__lowerCamelCase , "config.42.0.0.json" ) )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_SCREAMING_SNAKE_CASE : str = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_SCREAMING_SNAKE_CASE : Optional[int] = "v4.0.0"
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
__lowerCamelCase , return_unused_kwargs=__lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_SCREAMING_SNAKE_CASE : Optional[Any] = "v3.0.0"
_SCREAMING_SNAKE_CASE : Tuple = old_transformers.models.auto.AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 325
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Any ):
__lowercase : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__lowercase : Tuple = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowercase : int = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__lowercase : Any = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowercase : str = model(_snake_case , decoder_input_ids=_snake_case ).logits
__lowercase : str = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
__lowercase : int = -(labels.shape[-1] * loss.item())
__lowercase : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 156
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__)
lowerCamelCase : Dict ={
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __a ( A__ ):
_lowerCAmelCase : Tuple = '''speech_to_text_2'''
_lowerCAmelCase : Dict = ['''past_key_values''']
_lowerCAmelCase : Any = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=1_00_00 , SCREAMING_SNAKE_CASE : List[Any]=6 , SCREAMING_SNAKE_CASE : List[Any]=20_48 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]="relu" , SCREAMING_SNAKE_CASE : Tuple=2_56 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : int=0.0_2 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=1 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : str=10_24 , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Optional[Any] = d_model
UpperCamelCase__ : Optional[Any] = decoder_ffn_dim
UpperCamelCase__ : str = decoder_layers
UpperCamelCase__ : Any = decoder_attention_heads
UpperCamelCase__ : List[str] = dropout
UpperCamelCase__ : int = attention_dropout
UpperCamelCase__ : Optional[int] = activation_dropout
UpperCamelCase__ : Union[str, Any] = activation_function
UpperCamelCase__ : Tuple = init_std
UpperCamelCase__ : Optional[int] = decoder_layerdrop
UpperCamelCase__ : Dict = use_cache
UpperCamelCase__ : str = decoder_layers
UpperCamelCase__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 189
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( __A ):
'''simple docstring'''
__lowerCAmelCase = ['image_processor', 'tokenizer']
__lowerCAmelCase = 'LayoutLMv2ImageProcessor'
__lowerCAmelCase = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
__a : str = kwargs.pop('''feature_extractor''' )
__a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__a : List[Any] = self.image_processor(images=__lowercase , return_tensors=__lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowercase , __lowercase ):
__a : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a : Optional[int] = features['''words''']
__a : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel values
__a : Dict = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a : List[Any] = self.get_overflowing_images(__lowercase , encoded_inputs['''overflow_to_sample_mapping'''] )
__a : Tuple = images
return encoded_inputs
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(__lowercase )} and {len(__lowercase )}""" )
return images_with_overflow
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def _lowerCamelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , )
return self.image_processor
| 359
|
"""simple docstring"""
def __A ( a_ :int) -> Union[str, Any]:
__a : int = []
__a : Dict = []
__a : str = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__a : Tuple = len(a_) if (len(a_) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8) , '''Stack'''.center(a_) , '''Postfix'''.center(a_) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a_) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a_) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a_) == 0:
stack.append(a_) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a_) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(a_) # push x to stack
print(
x.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
while len(a_) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
return "".join(a_) # return Postfix as str
def __A ( a_ :int) -> List[Any]:
__a : Dict = list(infix[::-1]) # reverse the infix equation
for i in range(len(a_)):
if infix[i] == "(":
__a : Union[str, Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
__a : List[str] = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(a_)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
A = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 188
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
def count_of_possible_combinations(SCREAMING_SNAKE_CASE__) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item) for item in array)
return count_of_possible_combinations(__snake_case)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case: int = sum(
count_of_possible_combinations_with_dp_array(target - item , __snake_case)
for item in array)
__snake_case: Optional[Any] = answer
return answer
__snake_case: str = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__snake_case , __snake_case)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Any = [0] * (target + 1)
__snake_case: int = 1
for i in range(1 , target + 1):
for j in range(__snake_case):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : int = 3
__UpperCAmelCase : Union[str, Any] = 5
__UpperCAmelCase : Optional[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 111
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 9
_UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase = kruskal(__snake_case, __snake_case )
_UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__snake_case ) == sorted(__snake_case )
| 194
| 0
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ) -> Optional[int]:
__snake_case : str = 1.5
__snake_case : int = int(factor * num_class_images )
__snake_case : Optional[int] = ClipClient(
url='https://knn.laion.ai/knn-service' ,indice_name='laion_400m' ,num_images=_UpperCAmelCase ,aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' ,exist_ok=_UpperCAmelCase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__snake_case : str = client.query(text=_UpperCAmelCase )
if len(_UpperCAmelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
__snake_case : int = int(factor * num_images )
__snake_case : List[Any] = ClipClient(
url='https://knn.laion.ai/knn-service' ,indice_name='laion_400m' ,num_images=_UpperCAmelCase ,aesthetic_weight=0.1 ,)
__snake_case : Tuple = 0
__snake_case : Dict = 0
__snake_case : Optional[Any] = tqdm(desc='downloading real regularization images' ,total=_UpperCAmelCase )
with open(f'''{class_data_dir}/caption.txt''' ,'w' ) as fa, open(f'''{class_data_dir}/urls.txt''' ,'w' ) as fa, open(
f'''{class_data_dir}/images.txt''' ,'w' ) as fa:
while total < num_class_images:
__snake_case : str = class_images[count]
count += 1
try:
__snake_case : Union[str, Any] = requests.get(images['url'] )
if img.status_code == 2_00:
__snake_case : List[Any] = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' ,'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a_ ( ) -> Optional[int]:
__snake_case : Optional[Any] = argparse.ArgumentParser('' ,add_help=_UpperCAmelCase )
parser.add_argument('--class_prompt' ,help='text prompt to retrieve images' ,required=_UpperCAmelCase ,type=_UpperCAmelCase )
parser.add_argument('--class_data_dir' ,help='path to save images' ,required=_UpperCAmelCase ,type=_UpperCAmelCase )
parser.add_argument('--num_class_images' ,help='number of images to download' ,default=2_00 ,type=_UpperCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
A__ : str = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
| 320
|
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
| 320
| 1
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__snake_case ="""true"""
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Any=82 , lowerCamelCase : Any=16 ):
set_seed(42 )
lowerCAmelCase = RegressionModel()
lowerCAmelCase = deepcopy(lowerCamelCase )
lowerCAmelCase = RegressionDataset(length=lowerCamelCase )
lowerCAmelCase = DataLoader(lowerCamelCase , batch_size=lowerCamelCase )
model.to(accelerator.device )
lowerCAmelCase , lowerCAmelCase = accelerator.prepare(lowerCamelCase , lowerCamelCase )
return model, ddp_model, dataloader
def a_ ( lowerCamelCase : Accelerator , lowerCamelCase : List[str]=False ):
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCAmelCase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCamelCase : Union[str, Any] ):
lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
with accelerator.main_process_first():
lowerCAmelCase = dataset.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase : Any ):
if use_longest:
return tokenizer.pad(lowerCamelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(lowerCamelCase , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=16 )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ):
lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase , split_batches=lowerCamelCase )
lowerCAmelCase = get_dataloader(lowerCamelCase , not dispatch_batches )
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = accelerator.prepare(lowerCamelCase , lowerCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : str ):
lowerCAmelCase = []
for batch in dataloader:
lowerCAmelCase , lowerCAmelCase = batch.values()
with torch.no_grad():
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase , lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase )
targs.append(lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = torch.cat(lowerCamelCase ), torch.cat(lowerCamelCase )
return logits, targs
def a_ ( lowerCamelCase : Accelerator , lowerCamelCase : List[str]=82 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Optional[Any]=16 ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = get_basic_setup(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = generate_predictions(lowerCamelCase , lowerCamelCase , lowerCamelCase )
assert (
len(lowerCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase )}'''
def a_ ( lowerCamelCase : bool = False , lowerCamelCase : bool = False ):
lowerCAmelCase = evaluate.load('glue' , 'mrpc' )
lowerCAmelCase , lowerCAmelCase = get_mrpc_setup(lowerCamelCase , lowerCamelCase )
# First do baseline
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = setup['no']
model.to(lowerCamelCase )
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase )
with torch.inference_mode():
lowerCAmelCase = model(**lowerCamelCase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCamelCase , references=batch['labels'] )
lowerCAmelCase = metric.compute()
# Then do distributed
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase = model(**lowerCamelCase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase = batch['labels']
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCamelCase , references=lowerCamelCase )
lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def a_ ( ):
lowerCAmelCase = Accelerator(split_batches=lowerCamelCase , dispatch_batches=lowerCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowerCamelCase , lowerCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase = Accelerator(split_batches=lowerCamelCase , dispatch_batches=lowerCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowerCamelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase , 512 )
accelerator.state._reset_state()
def a_ ( lowerCamelCase : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 352
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : list[tuple[float, float]] ) -> str:
lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCAmelCase = len(UpperCAmelCase__ ) - 1
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = self.basis_function(UpperCAmelCase__ )
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : float = 0.01 ) -> Optional[int]:
from matplotlib import pyplot as plt # type: ignore
lowerCAmelCase = [] # x coordinates of points to plot
lowerCAmelCase = [] # y coordinates of points to plot
lowerCAmelCase = 0.0
while t <= 1:
lowerCAmelCase = self.bezier_curve_function(UpperCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCAmelCase = [i[0] for i in self.list_of_points]
lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase__ , UpperCAmelCase__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["vqvae"]
def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
__lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 2_55) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
__lowercase = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase = (images * 2_55).round().astype('uint8' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 2_55) * 2 - 1
__lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 325
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : nn.Module , SCREAMING_SNAKE_CASE : int):
super().__init__()
_A : Tuple = module
_A : str = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE) , nn.Linear(SCREAMING_SNAKE_CASE , module.out_features , bias=SCREAMING_SNAKE_CASE) , )
_A : Any = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def A ( self : Any , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int):
return self.module(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) + self.adapter(SCREAMING_SNAKE_CASE)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
a = "bigscience/bloom-1b7"
# Constant values
a = 2.109_6595_5269_2574
a = "Hello my name is"
a = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a = 10
def A ( self : List[Any]):
# Models and tokenizer
_A : List[Any] = AutoTokenizer.from_pretrained(self.model_name)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : List[str]):
super().setUp()
# Models and tokenizer
_A : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
_A : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
def A ( self : List[Any]):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A ( self : str):
_A : Any = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'quantization_config'))
_A : str = config.to_dict()
_A : Dict = config.to_diff_dict()
_A : int = config.to_json_string()
def A ( self : List[str]):
from bitsandbytes.nn import Paramsabit
_A : List[str] = self.model_fpaa.get_memory_footprint()
_A : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
_A : str = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def A ( self : List[Any]):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def A ( self : Any):
_A : List[str] = self.tokenizer(self.input_text , return_tensors='pt')
_A : Any = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def A ( self : List[str]):
_A : List[Any] = BitsAndBytesConfig()
_A : Optional[int] = True
_A : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , device_map='auto')
_A : int = self.tokenizer(self.input_text , return_tensors='pt')
_A : Any = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def A ( self : Union[str, Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE)
def A ( self : int):
_A : Any = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE):
_A : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto' , bnb_abit_quant_type='nf4' , )
def A ( self : Optional[Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_A : str = self.tokenizer(self.input_text , return_tensors='pt')
_A : Optional[Any] = self.model_fpaa.to(torch.floataa)
_A : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
_A : Tuple = self.model_fpaa.to('cpu')
# Check this does not throw an error
_A : Optional[int] = self.model_fpaa.half()
# Check this does not throw an error
_A : int = self.model_fpaa.float()
def A ( self : Dict):
_A : Any = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any]):
_A : Tuple = 't5-small'
_A : Union[str, Any] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_A : str = AutoTokenizer.from_pretrained(cls.model_name)
_A : Optional[int] = 'Translate in German: Hello, my dog is cute'
def A ( self : Any):
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str]):
from transformers import TaForConditionalGeneration
_A : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
_A : List[str] = None
# test with `t5-small`
_A : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : int = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : List[str] = model.generate(**SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_A : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : Dict = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Any = model.generate(**SCREAMING_SNAKE_CASE)
_A : List[str] = modules
def A ( self : Dict):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_A : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
_A : int = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Union[str, Any] = model.generate(**SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_A : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : str = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Optional[int] = model.generate(**SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Union[str, Any]):
super().setUp()
# model_name
_A : Dict = 'bigscience/bloom-560m'
_A : List[str] = 't5-small'
# Different types of model
_A : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# Sequence classification model
_A : Tuple = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# CausalLM model
_A : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# Seq2seq model
_A : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
def A ( self : str):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Optional[Any]):
super().setUp()
def A ( self : Any):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int]):
_A : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_A : Union[str, Any] = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : List[Any]):
super().setUp()
def A ( self : Any):
_A : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
_A : Tuple = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
_A : Union[str, Any] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Optional[Any]):
_A : Any = 'facebook/opt-350m'
super().setUp()
def A ( self : List[Any]):
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
_A : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
_A : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_A : Tuple = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE)):
_A : List[Any] = LoRALayer(module.q_proj , rank=16)
_A : Union[str, Any] = LoRALayer(module.k_proj , rank=16)
_A : str = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
_A : Union[str, Any] = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_A : Union[str, Any] = model.forward(**SCREAMING_SNAKE_CASE)
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(SCREAMING_SNAKE_CASE , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "gpt2-xl"
a = 3.3191_8548_5415_2187
| 227
| 0
|
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
|
from math import factorial
def UpperCAmelCase__ ( _A : int = 1_00 ):
'''simple docstring'''
return sum(int(_A ) for x in str(factorial(_A ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 188
| 0
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
debug_launcher(test_script.main )
def snake_case_ ( self ):
debug_launcher(test_ops.main )
| 80
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE_ ( )-> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_lowerCamelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
assert _test_patching.open is open
_lowerCamelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
# pandas.read_csv is not present in _test_patching
_lowerCamelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , snake_case ):
pass
def SCREAMING_SNAKE_CASE_ ( )-> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_lowerCamelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , snake_case ) is None
with patch_submodule(_test_patching , 'len' , snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = '__test_patch_submodule_start_and_stop_mock__'
_lowerCamelCase = patch_submodule(_test_patching , 'open' , snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_lowerCamelCase = '__test_patch_submodule_successive_join__'
_lowerCamelCase = '__test_patch_submodule_successive_dirname__'
_lowerCamelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
_lowerCamelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , snake_case ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , snake_case ):
pass
| 80
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> str:
a = 1.5
a = int(factor * num_class_images )
a = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=a , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=a )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a = client.query(text=a )
if len(a ) >= factor * num_class_images or num_images > 1e4:
break
else:
a = int(factor * num_images )
a = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=a , aesthetic_weight=0.1 , )
a = 0
a = 0
a = tqdm(desc='''downloading real regularization images''' , total=a )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
a = class_images[count]
count += 1
try:
a = requests.get(images['''url'''] )
if img.status_code == 200:
a = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _a ( ) -> Optional[Any]:
a = argparse.ArgumentParser('''''' , add_help=a )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=a , type=a )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=a , type=a )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=a )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = jnp.ones((batch_size, length) ) / length
return scores
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = None
__snake_case : Tuple = 20
__snake_case : int = self._get_uniform_logits(batch_size=2 , length=__lowerCAmelCase )
# tweak scores to not be uniform anymore
__snake_case : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__snake_case : str = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__snake_case : List[str] = jax.nn.softmax(__lowerCAmelCase , axis=-1 )
__snake_case : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
__snake_case : Dict = jax.nn.softmax(temp_dist_warper_sharper(__lowerCAmelCase , scores.copy() , cur_len=__lowerCAmelCase ) , axis=-1 )
__snake_case : str = jax.nn.softmax(temp_dist_warper_smoother(__lowerCAmelCase , scores.copy() , cur_len=__lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = None
__snake_case : Any = 10
__snake_case : Optional[int] = 2
# create ramp distribution
__snake_case : str = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
__snake_case : int = ramp_logits[1:, : vocab_size // 2] + vocab_size
__snake_case : Optional[int] = FlaxTopKLogitsWarper(3 )
__snake_case : Any = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__snake_case : Optional[int] = 5
__snake_case : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__snake_case : Tuple = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
__snake_case : List[Any] = top_k_warp_safety_check(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = 10
__snake_case : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__snake_case : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__snake_case : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
__snake_case : Any = np.exp(top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__snake_case : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__snake_case : List[Any] = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__snake_case : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__snake_case : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__snake_case : Any = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = 20
__snake_case : Any = 4
__snake_case : Optional[Any] = 0
__snake_case : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowerCAmelCase )
# check that min length is applied at length 5
__snake_case : Optional[int] = ids_tensor((batch_size, 20) , vocab_size=20 )
__snake_case : Optional[int] = 5
__snake_case : Any = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
__snake_case : str = min_dist_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__snake_case : Union[str, Any] = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
__snake_case : Any = 15
__snake_case : Dict = min_dist_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = 20
__snake_case : Optional[int] = 4
__snake_case : Optional[int] = 0
__snake_case : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
__snake_case : Any = ids_tensor((batch_size, 1) , vocab_size=20 )
__snake_case : List[str] = 1
__snake_case : int = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
__snake_case : List[Any] = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__snake_case : int = 3
__snake_case : int = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
__snake_case : str = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__snake_case : List[str] = 20
__snake_case : Optional[Any] = 4
__snake_case : Union[str, Any] = 0
__snake_case : List[str] = 5
__snake_case : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
__snake_case : Union[str, Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
__snake_case : Union[str, Any] = 4
__snake_case : int = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
__snake_case : int = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__snake_case : Tuple = 3
__snake_case : int = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
__snake_case : Tuple = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
__snake_case : str = 4
__snake_case : List[Any] = 10
__snake_case : List[Any] = 15
__snake_case : List[Any] = 2
__snake_case : List[Any] = 1
__snake_case : List[Any] = 15
# dummy input_ids and scores
__snake_case : Optional[int] = ids_tensor((batch_size, sequence_length) , __lowerCAmelCase )
__snake_case : Tuple = input_ids.copy()
__snake_case : Optional[int] = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
__snake_case : int = scores.copy()
# instantiate all dist processors
__snake_case : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : Any = FlaxTopKLogitsWarper(3 )
__snake_case : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__snake_case : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowerCAmelCase )
__snake_case : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase )
__snake_case : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
__snake_case : int = 10
# no processor list
__snake_case : List[Any] = temp_dist_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : Dict = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : List[Any] = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : List[Any] = min_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : Any = bos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : Dict = eos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# with processor list
__snake_case : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__snake_case : Union[str, Any] = processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
__snake_case : Tuple = 4
__snake_case : Optional[int] = 10
__snake_case : Optional[int] = 15
__snake_case : str = 2
__snake_case : List[Any] = 1
__snake_case : Tuple = 15
# dummy input_ids and scores
__snake_case : int = ids_tensor((batch_size, sequence_length) , __lowerCAmelCase )
__snake_case : Any = input_ids.copy()
__snake_case : Union[str, Any] = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase )
__snake_case : Optional[Any] = scores.copy()
# instantiate all dist processors
__snake_case : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : List[str] = FlaxTopKLogitsWarper(3 )
__snake_case : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__snake_case : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowerCAmelCase )
__snake_case : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase )
__snake_case : str = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
__snake_case : int = 10
# no processor list
def run_no_processor_list(__magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
__snake_case : Dict = temp_dist_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : Tuple = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : List[str] = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : Any = min_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : Any = bos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
__snake_case : Tuple = eos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
return scores
# with processor list
def run_processor_list(__magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ):
__snake_case : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__snake_case : List[Any] = processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase )
return scores
__snake_case : List[Any] = jax.jit(__lowerCAmelCase )
__snake_case : int = jax.jit(__lowerCAmelCase )
__snake_case : Tuple = jitted_run_no_processor_list(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case : int = jitted_run_processor_list(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 357
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """sentencepiece.model"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
lowerCAmelCase_ = {
"""google/rembert""": 2_5_6,
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = VOCAB_FILES_NAMES
lowerCamelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , __magic_name__ , __magic_name__=False , __magic_name__=True , __magic_name__=True , __magic_name__="[CLS]" , __magic_name__="[SEP]" , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , **__magic_name__ , ) -> Dict:
'''simple docstring'''
super().__init__(
do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , )
snake_case_ : Tuple = do_lower_case
snake_case_ : Union[str, Any] = remove_space
snake_case_ : List[str] = keep_accents
snake_case_ : Dict = vocab_file
snake_case_ : Dict = spm.SentencePieceProcessor()
self.sp_model.Load(__magic_name__ )
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.__dict__.copy()
snake_case_ : Union[str, Any] = None
return state
def __setstate__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = d
snake_case_ : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCamelCase (self , __magic_name__ , __magic_name__=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.sp_model.EncodeAsPieces(__magic_name__ )
return pieces
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
return self.sp_model.IdToPiece(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.sp_model.decode_pieces(__magic_name__ )
return out_string
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> Any:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> int:
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__magic_name__ ) )
return
snake_case_ : Optional[int] = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 279
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 2
lowerCamelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase_ )
if n > 1:
factors.append(UpperCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
__UpperCAmelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__UpperCAmelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__UpperCAmelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__UpperCAmelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__UpperCAmelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__UpperCAmelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__UpperCAmelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__UpperCAmelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 358
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : str=2 , lowerCAmelCase : Optional[Any]=56 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=2 , lowerCAmelCase : str=7 , lowerCAmelCase : List[Any]="gelu_new" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Union[str, Any]="block_sparse" , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[Any]=3 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : List[Any] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Union[str, Any] = use_attention_mask
__lowerCAmelCase : Tuple = use_token_type_ids
__lowerCAmelCase : Union[str, Any] = use_labels
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : List[Any] = type_sequence_label_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : str = num_choices
__lowerCAmelCase : Any = rescale_embeddings
__lowerCAmelCase : str = attention_type
__lowerCAmelCase : List[Any] = use_bias
__lowerCAmelCase : List[str] = block_size
__lowerCAmelCase : Union[str, Any] = num_random_blocks
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
if self.use_token_type_ids:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase : List[str] =False
lowerCamelCase : Union[str, Any] =False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[str] = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Union[str, Any] ):
return model(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : str = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : List[Any] = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=1e-5 , lowerCAmelCase : Union[str, Any]="outputs" , lowerCAmelCase : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 139
| 0
|
"""simple docstring"""
from collections import deque
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = len(A_ )
lowerCAmelCase__ : Optional[int] = deque()
lowerCAmelCase__ : Optional[Any] = [False for _ in range(A_ )]
lowerCAmelCase__ : Union[str, Any] = [-1 for _ in range(A_ )]
lowerCAmelCase__ : Any = index_of[:]
def strong_connect(A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[int] = index # the number when this node is seen
lowerCAmelCase__ : Any = index # lowest rank node reachable from here
index += 1
stack.append(A_ )
lowerCAmelCase__ : Dict = True
for w in g[v]:
if index_of[w] == -1:
lowerCAmelCase__ : str = strong_connect(A_ , A_ , A_ )
lowerCAmelCase__ : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCAmelCase__ : Union[str, Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : List[Any] = stack.pop()
lowerCAmelCase__ : Optional[int] = False
component.append(A_ )
while w != v:
lowerCAmelCase__ : Optional[Any] = stack.pop()
lowerCAmelCase__ : int = False
component.append(A_ )
components.append(A_ )
return index
lowerCAmelCase__ : Dict = []
for v in range(A_ ):
if index_of[v] == -1:
strong_connect(A_ , 0 , A_ )
return components
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Any = [[] for _ in range(A_ )]
for u, v in edges:
g[u].append(A_ )
return g
if __name__ == "__main__":
# Test
__UpperCamelCase : Optional[int] = 7
__UpperCamelCase : List[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__UpperCamelCase : List[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__UpperCamelCase : List[str] = [(u, v) for u, v in zip(source, target)]
__UpperCamelCase : int = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 106
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=lowerCamelCase_ )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=lowerCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = self.prepare_image_inputs()
a = image_processor(lowerCamelCase_ , return_tensors="np" )
a = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=lowerCamelCase_ )
a = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(lowerCamelCase_ )
a = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 227
| 0
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = CLIPConfig
lowercase__ = ['''CLIPEncoderLayer''']
def __init__( self : Dict , UpperCamelCase__ : CLIPConfig ) -> str:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
__UpperCamelCase =CLIPVisionModelWithProjection(config.vision_config )
__UpperCamelCase =nn.Linear(config.vision_config.projection_dim , 1 )
__UpperCamelCase =nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=0.5 , UpperCamelCase__ : Tuple=0.5 ) -> str:
'''simple docstring'''
__UpperCamelCase =self.vision_model(UpperCamelCase__ )[0]
__UpperCamelCase =self.p_head(UpperCamelCase__ )
__UpperCamelCase =nsfw_detected.flatten()
__UpperCamelCase =nsfw_detected > p_threshold
__UpperCamelCase =nsfw_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ):
if nsfw_detected_:
__UpperCamelCase =np.zeros(images[idx].shape )
__UpperCamelCase =self.w_head(UpperCamelCase__ )
__UpperCamelCase =watermark_detected.flatten()
__UpperCamelCase =watermark_detected > w_threshold
__UpperCamelCase =watermark_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase__ ):
if watermark_detected_:
__UpperCamelCase =np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 85
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__lowercase = {
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
__UpperCamelCase =getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
__UpperCamelCase =add_prefix_space
__UpperCamelCase =pre_tok_class(**UpperCamelCase__ )
__UpperCamelCase =add_prefix_space
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
__UpperCamelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
__UpperCamelCase =input_ids[-self.model_max_length :]
return input_ids
| 85
| 1
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCamelCase__ = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , __A ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
assert _test_patching.open is open
UpperCamelCase__ = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , __A ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , __A ):
pass
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , __A ) is None
with patch_submodule(_test_patching , "len" , __A ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = "__test_patch_submodule_start_and_stop_mock__"
UpperCamelCase__ = patch_submodule(_test_patching , "open" , __A )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCamelCase__ = "__test_patch_submodule_successive_join__"
UpperCamelCase__ = "__test_patch_submodule_successive_dirname__"
UpperCamelCase__ = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , __A ):
with patch_submodule(_test_patching , "os.rename" , __A ):
with patch_submodule(_test_patching , "os.path.dirname" , __A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , __A ):
with patch_submodule(_test_patching , "os.path.join" , __A ):
with patch_submodule(_test_patching , "os.path.dirname" , __A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __A ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __A ):
pass
| 80
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80
| 1
|
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCAmelCase : List[Any] ={
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCAmelCase : int =logging.WARNING
def UpperCAmelCase_ ( ):
lowercase_ :int = os.getenv("DATASETS_VERBOSITY" ,SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def UpperCAmelCase_ ( ):
return __name__.split("." )[0]
def UpperCAmelCase_ ( ):
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ( ):
# Apply our default configuration to the library root logger.
lowercase_ :List[str] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase_ ( ):
lowercase_ :str = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[str] = None ):
if name is None:
lowercase_ :Optional[Any] = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ ( __lowerCamelCase : int ):
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( ):
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( ):
lowercase_ :int = False
def UpperCAmelCase_ ( ):
lowercase_ :int = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class a_ :
def __init__( self : Tuple , *lowercase : Optional[Any] , **lowercase : Tuple ): # pylint: disable=unused-argument
"""simple docstring"""
lowercase_ :List[str] = args[0] if args else None
def __iter__( self : Tuple ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Tuple , lowercase : int ):
"""simple docstring"""
def empty_fn(*lowercase : Optional[int] , **lowercase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
"""simple docstring"""
return self
def __exit__( self : Dict , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Union[str, Any] ):
"""simple docstring"""
return
lowerCAmelCase : str =True
class a_ :
def __call__( self : Any , *lowercase : Union[str, Any] , lowercase : Optional[int]=False , **lowercase : str ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def lowercase__ ( self : Optional[Any] , *lowercase : List[Any] , **lowercase : int ):
"""simple docstring"""
lowercase_ :int = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def lowercase__ ( self : str ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase : Optional[Any] =_tqdm_cls()
def UpperCAmelCase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ( ):
global _tqdm_active
lowercase_ :Union[str, Any] = True
def UpperCAmelCase_ ( ):
global _tqdm_active
lowercase_ :List[str] = False
| 364
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147
| 0
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a ( snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = AutoConfig.from_pretrained(snake_case__ )
lowercase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case__ )
lowercase_ = checkpoints.load_tax_checkpoint(snake_case__ )
lowercase_ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowercase_ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase_ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowercase_ = F'''layers_{str(snake_case__ )}'''
# Self-Attention
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase_ = flax_model.params['''encoder''']['''block'''][str(snake_case__ )]['''layer''']
lowercase_ = tax_attention_key
lowercase_ = tax_attention_out
lowercase_ = tax_attention_query
lowercase_ = tax_attention_value
lowercase_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_global_layer_norm
if split_mlp_wi:
lowercase_ = tax_mlp_wi_a
lowercase_ = tax_mlp_wi_a
else:
lowercase_ = tax_mlp_wi
lowercase_ = tax_mlp_wo
lowercase_ = tax_mlp_layer_norm
lowercase_ = flax_model_encoder_layer_block
# Only for layer 0:
lowercase_ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_encoder_global_rel_embedding
# Assigning
lowercase_ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowercase_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowercase_ = F'''layers_{str(snake_case__ )}'''
# Self-Attention
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowercase_ = tax_enc_dec_attention_module['''key''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''out''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''query''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase_ = flax_model.params['''decoder''']['''block'''][str(snake_case__ )]['''layer''']
lowercase_ = tax_attention_key
lowercase_ = tax_attention_out
lowercase_ = tax_attention_query
lowercase_ = tax_attention_value
lowercase_ = tax_pre_attention_layer_norm
lowercase_ = tax_enc_dec_attention_key
lowercase_ = tax_enc_dec_attention_out
lowercase_ = tax_enc_dec_attention_query
lowercase_ = tax_enc_dec_attention_value
lowercase_ = tax_cross_layer_norm
if split_mlp_wi:
lowercase_ = tax_mlp_wi_a
lowercase_ = tax_mlp_wi_a
else:
lowercase_ = tax_mlp_wi
lowercase_ = tax_mlp_wo
lowercase_ = txa_mlp_layer_norm
lowercase_ = flax_model_decoder_layer_block
# Decoder Normalization
lowercase_ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowercase_ = txa_decoder_norm
# Only for layer 0:
lowercase_ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_decoder_rel_embedding
# Token Embeddings
lowercase_ = tax_model['''target''']['''token_embedder''']['''embedding''']
lowercase_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase_ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(snake_case__ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__a = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 30
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13
| 0
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__A : Optional[Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def UpperCamelCase_ ( A__ : Dict , A__ : Dict ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=A__ )
def UpperCamelCase_ ( A__ : Optional[int] , A__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase_ : str = tmp_path_factory.getbasetemp() / """cache"""
lowerCAmelCase_ : Dict = test_hf_cache_home / """datasets"""
lowerCAmelCase_ : int = test_hf_cache_home / """metrics"""
lowerCAmelCase_ : List[Any] = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(A__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(A__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(A__ ) )
lowerCAmelCase_ : List[Any] = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(A__ ) )
lowerCAmelCase_ : str = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(A__ ) )
@pytest.fixture(autouse=A__ , scope="""session""" )
def UpperCamelCase_ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=A__ )
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , A__ )
@pytest.fixture
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , A__ )
| 89
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : List[Any] , A__ : str=False ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
lowerCAmelCase_ : str = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
lowerCAmelCase_ : str = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ : List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ : List[Any] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(A__ )-1}' )
if "norm" in key:
lowerCAmelCase_ : Any = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ : Tuple = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
lowerCAmelCase_ : int = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(A__ )-1}' )
if "layer_norm1" in key:
lowerCAmelCase_ : str = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ : Any = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ : str = key.replace(f'block{idx}' , f'block.{int(A__ )-1}' )
if "attn.q" in key:
lowerCAmelCase_ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ : Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ : str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ : List[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ : str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ : Dict = key.replace(f'linear_c{idx}' , f'linear_c.{int(A__ )-1}' )
if key.startswith("""head""" ):
lowerCAmelCase_ : int = key.replace("""head""" , """classifier""" )
lowerCAmelCase_ : int = value
return new_state_dict
def UpperCamelCase_ ( A__ : int , A__ : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ : int = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase_ : Optional[int] = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ : str = kv_bias[
config.hidden_sizes[i] :
]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return image
@torch.no_grad()
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : str = SegformerConfig()
lowerCAmelCase_ : Optional[Any] = False
# set attributes based on model_name
lowerCAmelCase_ : int = """huggingface/label-files"""
if "segformer" in model_name:
lowerCAmelCase_ : Optional[int] = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
lowerCAmelCase_ : List[Any] = 1_50
lowerCAmelCase_ : int = """ade20k-id2label.json"""
lowerCAmelCase_ : Tuple = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
lowerCAmelCase_ : List[str] = 19
lowerCAmelCase_ : Dict = """cityscapes-id2label.json"""
lowerCAmelCase_ : List[str] = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Optional[int] = model_name[4:6]
lowerCAmelCase_ : Union[str, Any] = 10_00
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Optional[Any] = (1, 10_00)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
lowerCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : int = 2_56
elif size == "b2":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : List[str] = 7_68
lowerCAmelCase_ : Any = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ : List[str] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Union[str, Any] = 7_68
lowerCAmelCase_ : Union[str, Any] = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Tuple = 7_68
lowerCAmelCase_ : Tuple = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : str = 7_68
lowerCAmelCase_ : Any = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
lowerCAmelCase_ : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A__ , align=A__ , do_random_crop=A__ )
# prepare image
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
lowerCAmelCase_ : str = torch.load(A__ , map_location=torch.device("""cpu""" ) )
else:
lowerCAmelCase_ : List[str] = torch.load(A__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
lowerCAmelCase_ : Dict = rename_keys(A__ , encoder_only=A__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(A__ , A__ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = SegformerForImageClassification(A__ )
else:
lowerCAmelCase_ : str = SegformerForSemanticSegmentation(A__ )
model.load_state_dict(A__ )
model.eval()
# forward pass
lowerCAmelCase_ : Tuple = model(A__ )
lowerCAmelCase_ : Union[str, Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ : Tuple = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ : str = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ : int = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowerCAmelCase_ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A : Tuple = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 89
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __snake_case :
def __init__( self : int , A_ : Tuple , A_ : Any=1_3 , A_ : int=7 , A_ : Any=False , A_ : Tuple=True , A_ : Any=False , A_ : Dict=False , A_ : Union[str, Any]=1_9 , A_ : str=3_2 , A_ : str=5 , A_ : Union[str, Any]=4 , A_ : List[Any]=3_7 , A_ : List[str]="gelu" , A_ : str=0.1 , A_ : str=0.1 , A_ : Tuple=5_1_2 , A_ : Optional[Any]=1_6 , A_ : Optional[int]=2 , A_ : str=0.02 , A_ : Union[str, Any]=3 , A_ : Tuple=4 , A_ : Union[str, Any]=None , ):
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Tuple = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : int = use_token_type_ids
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Optional[int] = num_labels
lowerCAmelCase_ : Union[str, Any] = num_choices
lowerCAmelCase_ : str = scope
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : Tuple = None
if self.use_input_mask:
lowerCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : str = None
lowerCAmelCase_ : int = None
if self.use_labels:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Dict = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=SCREAMING_SNAKE_CASE__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCAmelCase__ ( self : str , A_ : List[Any] , A_ : int , A_ : List[str] , A_ : Optional[Any] , A_ : str , A_ : Optional[Any]):
lowerCAmelCase_ : List[Any] = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE__).float()
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__)
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : List[str] = config_and_inputs
lowerCAmelCase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _a ,_a ,unittest.TestCase ):
_a = False
_a = (EsmForProteinFolding,) if is_torch_available() else ()
_a = ()
_a = {} if is_torch_available() else {}
_a = False
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Optional[int] = EsmFoldModelTester(self)
lowerCAmelCase_ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7)
def UpperCAmelCase__ ( self : List[str]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
@unittest.skip('''Does not support attention outputs''')
def UpperCAmelCase__ ( self : Any):
pass
@unittest.skip
def UpperCAmelCase__ ( self : Dict):
pass
@unittest.skip('''Esm does not support embedding resizing''')
def UpperCAmelCase__ ( self : List[Any]):
pass
@unittest.skip('''Esm does not support embedding resizing''')
def UpperCAmelCase__ ( self : Any):
pass
@unittest.skip('''ESMFold does not support passing input embeds!''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCAmelCase__ ( self : Dict):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCAmelCase__ ( self : Optional[Any]):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCAmelCase__ ( self : List[Any]):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCAmelCase__ ( self : str):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''')
def UpperCAmelCase__ ( self : List[Any]):
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip('''ESMFold only has one output format.''')
def UpperCAmelCase__ ( self : Optional[int]):
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''')
def UpperCAmelCase__ ( self : str):
pass
@unittest.skip('''ESMFold does not support input chunking.''')
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''')
def UpperCAmelCase__ ( self : Union[str, Any]):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCAmelCase__ ( self : Union[str, Any]):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCAmelCase__ ( self : str):
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''')
def UpperCAmelCase__ ( self : List[Any]):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase__ ( self : List[Any]):
pass
@require_torch
class __snake_case ( _a ):
@slow
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''').float()
model.eval()
lowerCAmelCase_ : str = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE__)["positions"]
lowerCAmelCase_ : str = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , SCREAMING_SNAKE_CASE__ , atol=1e-4))
| 103
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
SCREAMING_SNAKE_CASE:str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
SCREAMING_SNAKE_CASE:Dict = components[:-1] + [test_fn.replace(".py" , "" )]
SCREAMING_SNAKE_CASE:str = ".".join(snake_case )
return test_module_path
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_module_path(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = importlib.import_module(snake_case )
return test_module
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:List[Any] = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:int = get_test_module(snake_case )
for attr in dir(snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE:Union[str, Any] = getattr(snake_case , "all_model_classes" , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = test_class()
if hasattr(snake_case , "setUp" ):
test.setUp()
SCREAMING_SNAKE_CASE:str = None
if hasattr(snake_case , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE:Tuple = test.model_tester.__class__
return model_tester
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes_for_model(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE:Dict = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Dict = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Tuple = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def A_ ( snake_case ):
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 139
| 0
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCamelCase : Union[str, Any] = """bert-base-cased"""
__lowerCamelCase : Optional[Any] = """fp16"""
__lowerCamelCase : Any = """bf16"""
__lowerCamelCase : Tuple = [FPaa, BFaa]
@require_fsdp
@require_cuda
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
super().setUp()
snake_case__ : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def _lowercase ( self : Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
snake_case__ : Any = self.dist_env.copy()
snake_case__ : Union[str, Any] = f'''{i + 1}'''
snake_case__ : List[str] = strategy
with mockenv_context(**__A ):
snake_case__ : Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowercase ( self : Any ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
snake_case__ : str = self.dist_env.copy()
snake_case__ : str = prefetch_policy
with mockenv_context(**__A ):
snake_case__ : Any = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowercase ( self : Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
snake_case__ : Any = self.dist_env.copy()
snake_case__ : str = state_dict_type
with mockenv_context(**__A ):
snake_case__ : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
snake_case__ : Dict = self.dist_env.copy()
snake_case__ : int = policy
if policy == "TRANSFORMER_BASED_WRAP":
snake_case__ : Tuple = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
snake_case__ : Any = "2000"
with mockenv_context(**__A ):
snake_case__ : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
snake_case__ : str = self.dist_env.copy()
snake_case__ : Optional[int] = "TRANSFORMER_BASED_WRAP"
snake_case__ : Union[str, Any] = "T5Layer"
with mockenv_context(**__A ):
snake_case__ : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
snake_case__ : Optional[Any] = self.dist_env.copy()
snake_case__ : Dict = "SIZE_BASED_WRAP"
snake_case__ : List[Any] = "0"
with mockenv_context(**__A ):
snake_case__ : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowercase ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
snake_case__ : str = self.dist_env.copy()
snake_case__ : List[str] = mp_dtype
with mockenv_context(**__A ):
snake_case__ : Optional[int] = Accelerator()
if mp_dtype == "fp16":
snake_case__ : Tuple = torch.floataa
elif mp_dtype == "bf16":
snake_case__ : str = torch.bfloataa
snake_case__ : int = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _lowercase ( self : List[str] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
snake_case__ : Union[str, Any] = self.dist_env.copy()
snake_case__ : Tuple = str(__A ).lower()
with mockenv_context(**__A ):
snake_case__ : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
super().setUp()
snake_case__ : Tuple = 0.8_2
snake_case__ : int = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
snake_case__ : Optional[Any] = {
"multi_gpu_fp16": 3_2_0_0,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_0_0_0,
"fsdp_full_shard_transformer_based_wrap_fp16": 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
snake_case__ : Optional[Any] = 1_6_0
snake_case__ : Optional[Any] = 1_6_0
snake_case__ : Tuple = inspect.getfile(accelerate.test_utils )
snake_case__ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def _lowercase ( self : Dict ):
snake_case__ : Tuple = os.path.join(self.test_scripts_folder , "test_performance.py" )
snake_case__ : Dict = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
snake_case__ : str = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _lowercase ( self : List[Any] ):
snake_case__ : Optional[int] = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
snake_case__ : List[Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(__A ):
snake_case__ : Union[str, Any] = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
snake_case__ : Any = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
snake_case__ : str = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
snake_case__ : Tuple = cmd_config[:-1]
snake_case__ : Optional[Any] = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _lowercase ( self : Any ):
snake_case__ : str = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
snake_case__ : Optional[Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
snake_case__ : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
| 286
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 1
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
snake_case_ = Vector()
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(a__ ) , "(0,0,0,0,0,1)" )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 2, 3, 4] )
self.assertEqual(len(a__ ) , 4 )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 2] )
snake_case_ = Vector([1, 2, 3, 4, 5] )
snake_case_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
snake_case_ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 2, 3] )
snake_case_ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 2, 3] )
snake_case_ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 2, 3] )
snake_case_ = Vector([2, -1, 4] ) # for test of dot product
snake_case_ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 2, 3] )
snake_case_ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , a__ , a__ ) ) , "(3,4,7)" )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 0, 0, 0, 0, 0] )
snake_case_ = x.copy()
self.assertEqual(str(a__ ) , str(a__ ) )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(a__ ) , "(0,1,0)" )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(a__ ) )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(a__ , a__ ) )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(a__ , a__ ) )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
snake_case_ = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(a__ ) )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
snake_case_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
snake_case_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 85
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case_ = TransformeraDModel(**a__ )
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 85
| 1
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
lowercase__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCamelCase () -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : List[str] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , )-> Tuple:
"""simple docstring"""
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase__ = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
| 269
| 1
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowercase : Any = TypeVar("""KT""")
lowercase : Any = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , lowercase = "root" , lowercase = None) -> Dict:
'''simple docstring'''
a__ : Dict = key
a__ : Dict = value
a__ : list[Node[KT, VT]] = []
def __repr__( self) -> str:
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return len(self.forward)
class A__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , lowercase = 0.5 , lowercase = 16) -> str:
'''simple docstring'''
a__ : Node[KT, VT] = Node[KT, VT]()
a__ : Tuple = 0
a__ : Any = p
a__ : Optional[int] = max_level
def __str__( self) -> str:
'''simple docstring'''
a__ : List[Any] = list(self)
if len(lowercase) == 0:
return F'SkipList(level={self.level})'
a__ : Any = max((len(str(lowercase)) for item in items) , default=4)
a__ : Union[str, Any] = max(lowercase , 4) + 4
a__ : Any = self.head
a__ : List[Any] = []
a__ : str = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(lowercase , '-') + '* ' * len(lowercase))
lines.append(' ' * label_size + '| ' * len(lowercase))
while len(node.forward) != 0:
a__ : List[str] = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(lowercase , '-')
+ ' '.join(str(n.key) if n.key == node.key else '|' for n in forwards))
lines.append(' ' * label_size + '| ' * len(lowercase))
a__ : List[Any] = node.forward
lines.append('None'.ljust(lowercase) + '* ' * len(lowercase))
return F'SkipList(level={self.level})\n' + "\n".join(lowercase)
def __iter__( self) -> Optional[int]:
'''simple docstring'''
a__ : Dict = self.head
while len(node.forward) != 0:
yield node.forward[0].key
a__ : Tuple = node.forward[0]
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : int = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __lowercase ( self , lowercase) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
a__ : Union[str, Any] = []
a__ : Tuple = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
a__ : Tuple = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : Dict = self._locate_node(lowercase)
if node is not None:
for i, update_node in enumerate(lowercase):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
a__ : int = node.forward[i]
else:
a__ : Tuple = update_node.forward[:i]
def __lowercase ( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ , a__ : List[str] = self._locate_node(lowercase)
if node is not None:
a__ : Optional[int] = value
else:
a__ : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase):
update_vector.append(self.head)
a__ : str = level
a__ : Union[str, Any] = Node(lowercase , lowercase)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(lowercase)
else:
a__ : Any = new_node
def __lowercase ( self , lowercase) -> VT | None:
'''simple docstring'''
a__ , a__ : Optional[Any] = self._locate_node(lowercase)
if node is not None:
return node.value
return None
def A_ ( ) -> Optional[Any]:
a__ : int = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
a__ : Any = skip_list.head
a__ : List[Any] = {}
while node.level != 0:
a__ : Dict = node.forward[0]
a__ : Optional[Any] = node.value
assert len(A__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> Any:
a__ : List[str] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
a__ : Dict = skip_list.head
a__ : str = {}
while node.level != 0:
a__ : Dict = node.forward[0]
a__ : Tuple = node.value
if len(A__ ) != 4:
print()
assert len(A__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> Tuple:
a__ : List[str] = SkipList()
assert skip_list.find('Some key' ) is None
def A_ ( ) -> Optional[int]:
a__ : List[Any] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def A_ ( ) -> Dict:
a__ : Any = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> List[Any]:
a__ : List[Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def A_ ( ) -> str:
a__ : Any = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def A_ ( ) -> Tuple:
a__ : str = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(A__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Optional[Any]:
def is_sorted(A__ ):
return all(next_item >= item for item, next_item in zip(A__ , lst[1:] ) )
a__ : List[Any] = SkipList()
for i in range(10 ):
skip_list.insert(A__ , A__ )
assert is_sorted(list(A__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A__ ) )
def A_ ( ) -> Optional[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> Optional[Any]:
a__ : str = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 99
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 1_0, """max_num_jobs""": 1}, [range(1_0 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 1_0}, [range(lowerCAmelCase__ , i + 1 ) for i in range(1_0 )]),
({"""num_shards""": 1, """max_num_jobs""": 1_0}, [range(1 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"""num_shards""": 3, """max_num_jobs""": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = _distribute_shards(**lowerCAmelCase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 1_0, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str , lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = _split_gen_kwargs(lowerCAmelCase__ , lowerCAmelCase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase__ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase__ )
else:
UpperCAmelCase_: List[str] = _number_of_shards_in_gen_kwargs(lowerCAmelCase__ )
assert out == expected
| 147
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 352
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_a : Optional[Any] = FunnelConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : Optional[Any] = FunnelBaseModel(lowerCAmelCase_ ) if base_model else FunnelModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 89
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase: Tuple = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'altclip_text_model'
def __init__( self : List[Any], lowerCAmelCase : Union[str, Any]=250002, lowerCAmelCase : Tuple=1024, lowerCAmelCase : int=24, lowerCAmelCase : Optional[Any]=16, lowerCAmelCase : List[Any]=4096, lowerCAmelCase : Optional[Any]="gelu", lowerCAmelCase : Tuple=0.1, lowerCAmelCase : Dict=0.1, lowerCAmelCase : Tuple=514, lowerCAmelCase : Dict=1, lowerCAmelCase : List[str]=0.02, lowerCAmelCase : int=0.02, lowerCAmelCase : Optional[int]=1e-05, lowerCAmelCase : List[Any]=1, lowerCAmelCase : Dict=0, lowerCAmelCase : int=2, lowerCAmelCase : Optional[Any]="absolute", lowerCAmelCase : List[str]=True, lowerCAmelCase : Union[str, Any]=768, **lowerCAmelCase : Dict, ) -> int:
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowercase : Optional[Any] = vocab_size
lowercase : Optional[int] = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : List[Any] = hidden_act
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Union[str, Any] = max_position_embeddings
lowercase : Optional[Any] = type_vocab_size
lowercase : List[str] = initializer_range
lowercase : Any = initializer_factor
lowercase : Optional[Any] = layer_norm_eps
lowercase : Union[str, Any] = position_embedding_type
lowercase : int = use_cache
lowercase : Any = project_dim
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'altclip_vision_model'
def __init__( self : Tuple, lowerCAmelCase : List[Any]=768, lowerCAmelCase : Dict=3072, lowerCAmelCase : Any=512, lowerCAmelCase : List[Any]=12, lowerCAmelCase : str=12, lowerCAmelCase : Optional[int]=3, lowerCAmelCase : Optional[Any]=224, lowerCAmelCase : Dict=32, lowerCAmelCase : Dict="quick_gelu", lowerCAmelCase : int=1e-5, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : Optional[Any]=0.02, lowerCAmelCase : Optional[int]=1.0, **lowerCAmelCase : Optional[int], ) -> List[str]:
super().__init__(**lowerCAmelCase )
lowercase : List[str] = hidden_size
lowercase : List[str] = intermediate_size
lowercase : Optional[Any] = projection_dim
lowercase : Any = num_hidden_layers
lowercase : Tuple = num_attention_heads
lowercase : str = num_channels
lowercase : Tuple = patch_size
lowercase : int = image_size
lowercase : Any = initializer_range
lowercase : List[Any] = initializer_factor
lowercase : Optional[int] = attention_dropout
lowercase : str = layer_norm_eps
lowercase : Any = hidden_act
@classmethod
def lowercase ( cls : Dict, lowerCAmelCase : Union[str, os.PathLike], **lowerCAmelCase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase )
lowercase , lowercase : List[Any] = cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
lowercase : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'altclip'
_lowerCamelCase = True
def __init__( self : str, lowerCAmelCase : Dict=None, lowerCAmelCase : Any=None, lowerCAmelCase : Optional[Any]=768, lowerCAmelCase : Union[str, Any]=2.6592, **lowerCAmelCase : Union[str, Any] ) -> Any:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase : List[str] = kwargs.pop('text_config_dict', lowerCAmelCase )
lowercase : Optional[Any] = kwargs.pop('vision_config_dict', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase : Dict = {}
# This is the complete result when using `text_config_dict`.
lowercase : List[str] = AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase : Union[str, Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase : Optional[int] = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase : Any = {}
# This is the complete result when using `vision_config_dict`.
lowercase : Optional[int] = AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase : Union[str, Any] = {
str(lowerCAmelCase ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase : Any = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase : Any = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase : str = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
lowercase : List[Any] = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
lowercase : Union[str, Any] = AltCLIPTextConfig(**lowerCAmelCase )
lowercase : int = AltCLIPVisionConfig(**lowerCAmelCase )
lowercase : Any = projection_dim
lowercase : Any = logit_scale_init_value
lowercase : int = 1.0
@classmethod
def lowercase ( cls : int, lowerCAmelCase : AltCLIPTextConfig, lowerCAmelCase : AltCLIPVisionConfig, **lowerCAmelCase : str ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase ( self : Optional[int] ) -> Optional[int]:
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Optional[Any] = self.text_config.to_dict()
lowercase : Dict = self.vision_config.to_dict()
lowercase : str = self.__class__.model_type
return output
| 53
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase: str = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_UpperCamelCase: int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_UpperCamelCase: Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowercase ( self : List[str] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ), codebase_urls=['https://www.atticusprojectai.org/cuad'], reference_urls=['https://www.atticusprojectai.org/cuad'], )
def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
lowercase : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowercase : Any = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowercase : int = evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 53
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : List[str] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[int] = """timesformer"""
def __init__( self , snake_case_=2_2_4 , snake_case_=1_6 , snake_case_=3 , snake_case_=8 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-6 , snake_case_=True , snake_case_="divided_space_time" , snake_case_=0 , **snake_case_ , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A_ : List[Any] = image_size
A_ : Dict = patch_size
A_ : Tuple = num_channels
A_ : str = num_frames
A_ : List[Any] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Any = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Tuple = qkv_bias
A_ : Union[str, Any] = attention_type
A_ : Dict = drop_path_rate
| 286
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = in_proj_bias[: config.hidden_size]
A_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = dct.pop(_UpperCAmelCase )
A_ : Optional[int] = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCAmelCase , )
A_ : Optional[int] = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=384 , num_labels=1000 )
A_ : Union[str, Any] = False
# load original model from timm
A_ : List[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
A_ : Any = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = 'huggingface/label-files'
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : List[Any] = ViTHybridModel(_UpperCAmelCase ).eval()
else:
A_ : str = ViTHybridForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
A_ : List[str] = transform.transforms
A_ : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A_ : Tuple = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Optional[Any] = prepare_img()
A_ : Any = transform(_UpperCAmelCase ).unsqueeze(0 )
A_ : Dict = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
A_ : List[Any] = model(_UpperCAmelCase )
A_ : List[str] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
A_ : Union[str, Any] = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
A_ : Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 286
| 1
|
snake_case_ = {}
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__snake_case = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__snake_case = _calculate(days - 1 , snake_case_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__snake_case = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__snake_case = _calculate(days - 1 , snake_case_ , 0 )
__snake_case = state_late + state_absent + state_ontime
__snake_case = prizestrings
return prizestrings
def lowerCamelCase__ ( snake_case_ : int = 30 ) -> int:
return _calculate(snake_case_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 238
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : int , *a__ : List[Any] , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[Any] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : int , **a__ : int ):
"""simple docstring"""
return {}, {}, {}
def a (self : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 238
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
def _lowercase ( __snake_case ) -> Tuple:
# initialize config
if "resnet-50" in model_name:
__lowerCAmelCase : List[str] = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
__lowerCAmelCase : List[str] = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
__lowerCAmelCase : str = DetrConfig(use_timm_backbone=__snake_case ,backbone_config=__snake_case )
# set label attributes
__lowerCAmelCase : Optional[Any] = "panoptic" in model_name
if is_panoptic:
__lowerCAmelCase : Dict = 250
else:
__lowerCAmelCase : str = 91
__lowerCAmelCase : List[str] = "huggingface/label-files"
__lowerCAmelCase : Dict = "coco-detection-id2label.json"
__lowerCAmelCase : Tuple = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type="dataset" ) ,"r" ) )
__lowerCAmelCase : List[str] = {int(__snake_case ): v for k, v in idalabel.items()}
__lowerCAmelCase : Dict = idalabel
__lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _lowercase ( __snake_case ) -> Optional[Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Union[str, Any] = state_dict.pop(__snake_case )
__lowerCAmelCase : Optional[Any] = val
def _lowercase ( __snake_case ,__snake_case=False ) -> Dict:
__lowerCAmelCase : Optional[int] = ""
if is_panoptic:
__lowerCAmelCase : Optional[Any] = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase : Optional[Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase : Dict = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : int = in_proj_weight[:256, :]
__lowerCAmelCase : List[Any] = in_proj_bias[:256]
__lowerCAmelCase : Tuple = in_proj_weight[256:512, :]
__lowerCAmelCase : Any = in_proj_bias[256:512]
__lowerCAmelCase : Any = in_proj_weight[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : str = in_proj_weight[:256, :]
__lowerCAmelCase : Any = in_proj_bias[:256]
__lowerCAmelCase : str = in_proj_weight[256:512, :]
__lowerCAmelCase : int = in_proj_bias[256:512]
__lowerCAmelCase : Tuple = in_proj_weight[-256:, :]
__lowerCAmelCase : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase : int = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase : Any = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase : List[str] = in_proj_weight_cross_attn[:256, :]
__lowerCAmelCase : Dict = in_proj_bias_cross_attn[:256]
__lowerCAmelCase : Optional[int] = in_proj_weight_cross_attn[256:512, :]
__lowerCAmelCase : Optional[int] = in_proj_bias_cross_attn[256:512]
__lowerCAmelCase : Any = in_proj_weight_cross_attn[-256:, :]
__lowerCAmelCase : Optional[Any] = in_proj_bias_cross_attn[-256:]
def _lowercase ( ) -> List[Any]:
__lowerCAmelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase : Union[str, Any] = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
@torch.no_grad()
def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=False ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = get_detr_config(__snake_case )
# load original model from torch hub
__lowerCAmelCase : List[Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"""Converting model {model_name}...""" )
__lowerCAmelCase : Tuple = torch.hub.load("facebookresearch/detr" ,model_name_to_original_name[model_name] ,pretrained=__snake_case ).eval()
__lowerCAmelCase : List[str] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__snake_case ):
if is_panoptic:
__lowerCAmelCase : Optional[Any] = "detr." + src
rename_key(__snake_case ,__snake_case ,__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case ,is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase : Tuple = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__lowerCAmelCase : Tuple = state_dict.pop(__snake_case )
__lowerCAmelCase : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase : List[str] = state_dict.pop(__snake_case )
__lowerCAmelCase : Union[str, Any] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__lowerCAmelCase : Dict = state_dict.pop(__snake_case )
__lowerCAmelCase : Tuple = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__lowerCAmelCase : str = state_dict.pop(__snake_case )
__lowerCAmelCase : int = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase : Dict = DetrForSegmentation(__snake_case ) if is_panoptic else DetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify our conversion on an image
__lowerCAmelCase : Optional[Any] = "coco_panoptic" if is_panoptic else "coco_detection"
__lowerCAmelCase : Dict = DetrImageProcessor(format=__snake_case )
__lowerCAmelCase : List[str] = processor(images=prepare_img() ,return_tensors="pt" )
__lowerCAmelCase : int = encoding["pixel_values"]
__lowerCAmelCase : Union[str, Any] = detr(__snake_case )
__lowerCAmelCase : Optional[Any] = model(__snake_case )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1e-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
__snake_case : List[str] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 269
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269
| 1
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
A_ : Optional[Any] = getLogger(__name__)
A_ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 8 , lowerCAmelCase_ = DEFAULT_DEVICE , lowerCAmelCase_=False , lowerCAmelCase_="summarization" , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Path(lowerCAmelCase_ ).open("""w""" , encoding="""utf-8""" )
_UpperCAmelCase : Tuple = str(lowerCAmelCase_ )
_UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ )
if fpaa:
_UpperCAmelCase : Any = model.half()
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
_UpperCAmelCase : Any = time.time()
# update config with task specific params
use_task_specific_params(lowerCAmelCase_ , lowerCAmelCase_ )
if prefix is None:
_UpperCAmelCase : List[str] = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(lowerCAmelCase_ , lowerCAmelCase_ ) ) ):
_UpperCAmelCase : Optional[int] = [prefix + text for text in examples_chunk]
_UpperCAmelCase : Union[str, Any] = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ , padding="""longest""" ).to(lowerCAmelCase_ )
_UpperCAmelCase : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowerCAmelCase_ , )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
_UpperCAmelCase : Dict = int(time.time() - start_time ) # seconds
_UpperCAmelCase : int = len(lowerCAmelCase_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def snake_case_ ( lowerCAmelCase_=True )-> int:
'''simple docstring'''
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=lowerCAmelCase_ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=lowerCAmelCase_ , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=lowerCAmelCase_ , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=lowerCAmelCase_ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=lowerCAmelCase_ , default=8 , required=lowerCAmelCase_ , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=lowerCAmelCase_ , default=-1 , required=lowerCAmelCase_ , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=lowerCAmelCase_ , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_UpperCAmelCase ,_UpperCAmelCase : Tuple = parser.parse_known_args()
_UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase_ )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
_UpperCAmelCase : Optional[int] = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_UpperCAmelCase : List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowerCAmelCase_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
_UpperCAmelCase : Union[str, Any] = generate_summaries_or_translations(
lowerCAmelCase_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowerCAmelCase_ , )
if args.reference_path is None:
return {}
# Compute scores
_UpperCAmelCase : Any = calculate_bleu if """translation""" in args.task else calculate_rouge
_UpperCAmelCase : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_UpperCAmelCase : Tuple = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowerCAmelCase_ )]
_UpperCAmelCase : dict = score_fn(lowerCAmelCase_ , lowerCAmelCase_ )
scores.update(lowerCAmelCase_ )
if args.dump_args:
scores.update(lowerCAmelCase_ )
if args.info:
_UpperCAmelCase : Dict = args.info
if verbose:
print(lowerCAmelCase_ )
if args.score_path is not None:
json.dump(lowerCAmelCase_ , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 349
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__( _a ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] ):
a : Optional[int] = False
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
a : List[str] = self.image_processor
def __call__( self : Union[str, Any] , __snake_case : ImageInput = None , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : int , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
a : Optional[int] = self.tokenizer
a : List[Any] = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
a : List[str] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
if text is not None:
a : Union[str, Any] = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
a : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def lowercase_ ( self : Tuple , *__snake_case : Tuple , **__snake_case : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Tuple , **__snake_case : Dict ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowercase_ ( self : str ):
a : Union[str, Any] = self.tokenizer.model_input_names
a : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 297
|
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list:
"""simple docstring"""
UpperCamelCase = len(A__ )
for i in range(1 , A__ ):
UpperCamelCase = collection[i]
UpperCamelCase = 0
UpperCamelCase = i - 1
while low <= high:
UpperCamelCase = (low + high) // 2
if val < collection[mid]:
UpperCamelCase = mid - 1
else:
UpperCamelCase = mid + 1
for j in range(A__ , A__ , -1 ):
UpperCamelCase = collection[j - 1]
UpperCamelCase = val
return collection
if __name__ == "__main__":
_lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 28
| 0
|
from math import ceil
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(range(0 , SCREAMING_SNAKE_CASE ) )
lowercase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase__ = []
for i in device_map_blocks:
if device_map_blocks.count(SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(SCREAMING_SNAKE_CASE )
# Missing blocks
lowercase__ = [i for i in blocks if i not in device_map_blocks]
lowercase__ = [i for i in device_map_blocks if i not in blocks]
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(range(SCREAMING_SNAKE_CASE ) )
lowercase__ = int(ceil(n_layers / len(SCREAMING_SNAKE_CASE ) ) )
lowercase__ = [layers[i : i + n_blocks] for i in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
| 93
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> int:
"""simple docstring"""
lowercase__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = get_logger('''datasets-cli/converting''' )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(UpperCamelCase_ )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('''.py''' , '''''' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(UpperCamelCase_ )
lowercase__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 93
| 1
|
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__UpperCamelCase = str(bin(__lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase__ ( __lowercase : int , __lowercase : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__UpperCamelCase = str(bin(__lowercase ) )[2:]
if shift_amount >= len(__lowercase ):
return "0b0"
__UpperCamelCase = binary_number[: len(__lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase__ ( __lowercase : int , __lowercase : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
__UpperCamelCase = '0' + str(bin(__lowercase ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__UpperCamelCase = len(bin(__lowercase )[3:] ) # Find 2's complement of number
__UpperCamelCase = bin(abs(__lowercase ) - (1 << binary_number_length) )[3:]
__UpperCamelCase = (
'1' + '0' * (binary_number_length - len(__lowercase )) + binary_number
)
if shift_amount >= len(__lowercase ):
return "0b" + binary_number[0] * len(__lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122
|
def _UpperCamelCase ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = ShapEPipeline
_a = ['prompt']
_a = ['prompt']
_a = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
_a = False
@property
def snake_case ( self : str )-> List[str]:
return 32
@property
def snake_case ( self : Any )-> str:
return 32
@property
def snake_case ( self : Dict )-> Dict:
return self.time_input_dim * 4
@property
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
return 8
@property
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def snake_case ( self : str )-> str:
torch.manual_seed(0 )
lowerCamelCase__ : Dict =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(lowerCamelCase )
@property
def snake_case ( self : Any )-> int:
torch.manual_seed(0 )
lowerCamelCase__ : List[str] ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase__ : Optional[Any] =PriorTransformer(**lowerCamelCase )
return model
@property
def snake_case ( self : str )-> List[str]:
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase__ : Dict =ShapERenderer(**lowerCamelCase )
return model
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.dummy_prior
lowerCamelCase__ : Optional[int] =self.dummy_text_encoder
lowerCamelCase__ : str =self.dummy_tokenizer
lowerCamelCase__ : int =self.dummy_renderer
lowerCamelCase__ : int =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1024, prediction_type='''sample''', use_karras_sigmas=lowerCamelCase, clip_sample=lowerCamelCase, clip_sample_range=1.0, )
lowerCamelCase__ : str ={
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def snake_case ( self : Optional[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Any=0 )-> str:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : str =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Optional[Any] ={
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def snake_case ( self : List[Any] )-> List[str]:
lowerCamelCase__ : Tuple ='''cpu'''
lowerCamelCase__ : List[Any] =self.get_dummy_components()
lowerCamelCase__ : int =self.pipeline_class(**lowerCamelCase )
lowerCamelCase__ : Dict =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : int =pipe(**self.get_dummy_inputs(lowerCamelCase ) )
lowerCamelCase__ : int =output.images[0]
lowerCamelCase__ : int =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase__ : Optional[Any] =np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[Any] )-> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case ( self : int )-> str:
lowerCamelCase__ : Optional[int] =torch_device == '''cpu'''
lowerCamelCase__ : Tuple =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCamelCase, relax_max_difference=lowerCamelCase, )
def snake_case ( self : int )-> Dict:
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : Any =self.pipeline_class(**lowerCamelCase )
lowerCamelCase__ : List[str] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : List[Any] =1
lowerCamelCase__ : str =2
lowerCamelCase__ : List[str] =self.get_dummy_inputs(lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase__ : Optional[Any] =batch_size * [inputs[key]]
lowerCamelCase__ : Tuple =pipe(**lowerCamelCase, num_images_per_prompt=lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[str] )-> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str )-> Dict:
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
lowerCamelCase__ : Union[str, Any] =ShapEPipeline.from_pretrained('''openai/shap-e''' )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowerCamelCase__ : str =pipe(
'''a shark''', generator=lowerCamelCase, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
| 238
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowercase : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : Optional[Any], lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : int = 8, **lowerCamelCase : Tuple, )-> None:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : int =do_rescale
lowerCamelCase__ : Dict =rescale_factor
lowerCamelCase__ : Union[str, Any] =do_pad
lowerCamelCase__ : Union[str, Any] =pad_size
def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : float, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int )-> np.ndarray:
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_image_size(lowerCamelCase )
lowerCamelCase__ : List[str] =(old_height // size + 1) * size - old_height
lowerCamelCase__ : List[str] =(old_width // size + 1) * size - old_width
return pad(lowerCamelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : ImageInput, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> Dict:
lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : str =do_pad if do_pad is not None else self.do_pad
lowerCamelCase__ : int =pad_size if pad_size is not None else self.pad_size
lowerCamelCase__ : Optional[int] =make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images]
if do_pad:
lowerCamelCase__ : Tuple =[self.pad(lowerCamelCase, size=lowerCamelCase ) for image in images]
lowerCamelCase__ : int =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images]
lowerCamelCase__ : Dict ={'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
| 238
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase__ = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ):
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(lowerCAmelCase__ )
try:
if default is not None and len(lowerCAmelCase__ ) == 0:
return default
return convert_value(lowerCAmelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=[] , lowerCAmelCase__=None , lowerCAmelCase__=0 ):
lowerCAmelCase__ = BulletMenu(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = menu.run(default_choice=lowerCAmelCase__ )
return convert_value(lowerCAmelCase__ ) if convert_value is not None else result
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = int(lowerCAmelCase__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = int(lowerCAmelCase__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = int(lowerCAmelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = int(lowerCAmelCase__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = int(lowerCAmelCase__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def __lowerCamelCase ( lowerCAmelCase__ ):
return {"yes": True, "no": False}[value.lower()]
class a_ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __snake_case ( self : List[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : List[str]):
'''simple docstring'''
lowerCAmelCase__ = super()._format_usage(lowercase__ , lowercase__ , lowercase__ , lowercase__)
lowerCAmelCase__ = usage.replace('<command> [<args>] ' , '')
return usage
| 119
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119
| 1
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a__ : Tuple = getLogger(__name__)
a__ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
def _lowercase ( __A ,__A ,__A ,__A = 8 ,__A = DEFAULT_DEVICE ,__A=False ,__A="summarization" ,__A=None ,**__A ,):
'''simple docstring'''
__UpperCamelCase = Path(__A ).open("""w""" ,encoding="""utf-8""" )
__UpperCamelCase = str(__A )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__A ).to(__A )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(__A )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(__A ,__A )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config ,"""prefix""" ,"""""" ) or """"""
for examples_chunk in tqdm(list(chunks(__A ,__A ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(__A ,return_tensors="""pt""" ,truncation=__A ,padding="""longest""" ).to(__A )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**__A ,)
__UpperCamelCase = tokenizer.batch_decode(__A ,skip_special_tokens=__A ,clean_up_tokenization_spaces=__A )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(__A )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def _lowercase ( ):
'''simple docstring'''
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def _lowercase ( __A=True ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" ,type=__A ,help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" ,type=__A ,help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" ,type=__A ,help="""where to save summaries""" )
parser.add_argument("""--reference_path""" ,type=__A ,required=__A ,help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" ,type=__A ,required=__A ,default="""metrics.json""" ,help="""where to save metrics""" )
parser.add_argument("""--device""" ,type=__A ,required=__A ,default=__A ,help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" ,type=__A ,required=__A ,default=__A ,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" ,type=__A ,default="""summarization""" ,help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" ,type=__A ,default=8 ,required=__A ,help="""batch size""" )
parser.add_argument(
"""--n_obs""" ,type=__A ,default=-1 ,required=__A ,help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" ,action="""store_true""" )
parser.add_argument("""--dump-args""" ,action="""store_true""" ,help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" ,nargs="""?""" ,type=__A ,const=datetime_now() ,help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(__A )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__UpperCamelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__A )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__UpperCamelCase = generate_summaries_or_translations(
__A ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**__A ,)
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__A )]
__UpperCamelCase = score_fn(__A ,__A )
scores.update(__A )
if args.dump_args:
scores.update(__A )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(__A )
if args.score_path is not None:
json.dump(__A ,open(args.score_path ,"""w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 349
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349
| 1
|
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCAmelCase_ ):
if not nums:
return 0
_UpperCAmelCase : Tuple = nums[0]
_UpperCAmelCase : List[str] = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : int = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[Any] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93
| 1
|
lowerCamelCase : Dict = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Union[str, Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Tuple = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 367
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[str] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 176
| 0
|
def lowerCamelCase__ ( a__ : int ) -> int:
if not isinstance(a__ , a__ ):
raise TypeError("""only integers accepted as input""" )
else:
UpperCamelCase_ = str(abs(a__ ) )
UpperCamelCase_ = [list(a__ ) for char in range(len(a__ ) )]
for index in range(len(a__ ) ):
num_transpositions[index].pop(a__ )
return max(
int("""""".join(list(a__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 122
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Dict = BarthezTokenizer
A__ : List[Any] = BarthezTokenizerFast
A__ : int = True
A__ : str = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCamelCase )
UpperCamelCase_ = tokenizer
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """<pad>"""
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCamelCase ) , 1_0_1_1_2_2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
UpperCamelCase_ = self.tokenizer(
__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = """I was born in 92000, and this is falsé."""
UpperCamelCase_ = tokenizer.tokenize(__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__UpperCamelCase , )
| 122
| 1
|
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCamelCase : str = HfArgumentParser(InitializationArguments)
UpperCamelCase : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCamelCase : List[Any] = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
UpperCamelCase : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCamelCase : str = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 263
|
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : int = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
UpperCamelCase : Optional[Any] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
UpperCamelCase : Any = {
"jukebox": 5_1_2,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_LYRIC_TOKENS_SIZES
lowercase = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=["v3", "v2", "v2"] , __UpperCAmelCase=512 , __UpperCAmelCase=5 , __UpperCAmelCase="<|endoftext|>" , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
super().__init__(
unk_token=__UpperCAmelCase , n_genres=__UpperCAmelCase , version=__UpperCAmelCase , max_n_lyric_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = version
__UpperCamelCase = max_n_lyric_tokens
__UpperCamelCase = n_genres
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
__UpperCamelCase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__UpperCamelCase = oov.replace(R'\-\'' , R'\-+\'' )
__UpperCamelCase = regex.compile(__UpperCAmelCase )
__UpperCamelCase = {v: k for k, v in self.artists_encoder.items()}
__UpperCamelCase = {v: k for k, v in self.genres_encoder.items()}
__UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [self.artists_encoder.get(__UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(__UpperCAmelCase ) ):
__UpperCamelCase = [self.genres_encoder.get(__UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__UpperCamelCase = [[self.lyrics_encoder.get(__UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return list(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_for_tokenization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = self._tokenize(__UpperCAmelCase )
return artist, genre, lyrics
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__UpperCamelCase = artists[idx].lower()
__UpperCamelCase = [genres[idx].lower()]
else:
__UpperCamelCase = self._normalize(artists[idx] ) + '.v2'
__UpperCamelCase = [
self._normalize(__UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__UpperCamelCase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__UpperCamelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__UpperCamelCase = {vocab[index]: index + 1 for index in range(len(__UpperCAmelCase ) )}
__UpperCamelCase = 0
__UpperCamelCase = len(__UpperCAmelCase ) + 1
__UpperCamelCase = self.vocab
__UpperCamelCase = {v: k for k, v in self.vocab.items()}
__UpperCamelCase = ''
else:
__UpperCamelCase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__UpperCamelCase = self._run_strip_accents(__UpperCAmelCase )
__UpperCamelCase = lyrics.replace('\\' , '\n' )
__UpperCamelCase = self.out_of_vocab.sub('' , __UpperCAmelCase ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = unicodedata.normalize('NFD' , __UpperCAmelCase )
__UpperCamelCase = []
for char in text:
__UpperCamelCase = unicodedata.category(__UpperCAmelCase )
if cat == "Mn":
continue
output.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = (
[chr(__UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(__UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(__UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__UpperCamelCase = frozenset(__UpperCAmelCase )
__UpperCamelCase = re.compile(R'_+' )
__UpperCamelCase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__UpperCamelCase = pattern.sub('_' , __UpperCAmelCase ).strip('_' )
return text
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return " ".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = TensorType(__UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__UpperCamelCase = tf.constant
__UpperCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__UpperCamelCase = torch.tensor
__UpperCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__UpperCamelCase = jnp.array
__UpperCamelCase = _is_jax
else:
__UpperCamelCase = np.asarray
__UpperCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__UpperCamelCase = [inputs]
if not is_tensor(__UpperCAmelCase ):
__UpperCamelCase = as_tensor(__UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="pt" ):
'''simple docstring'''
__UpperCamelCase = [0, 0, 0]
__UpperCamelCase = [artist] * len(self.version )
__UpperCamelCase = [genres] * len(self.version )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.tokenize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._convert_token_to_id(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = [-INFINITY] * len(full_tokens[-1] )
__UpperCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCAmelCase ) )
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCAmelCase ) )
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.artists_decoder.get(__UpperCAmelCase )
__UpperCamelCase = [self.genres_decoder.get(__UpperCAmelCase ) for genre in genres_index]
__UpperCamelCase = [self.lyrics_decoder.get(__UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 263
| 1
|
import numpy as np
class lowerCAmelCase_ :
def __init__( self ) -> int:
UpperCamelCase : Union[str, Any] = (0, 0)
UpperCamelCase : str = None
UpperCamelCase : str = 0
UpperCamelCase : Dict = 0
UpperCamelCase : Optional[int] = 0
def __eq__( self, SCREAMING_SNAKE_CASE_ ) -> str:
return self.position == cell.position
def snake_case_ ( self ) -> str:
print(self.position )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_=(5, 5) ) -> Union[str, Any]:
UpperCamelCase : Any = np.zeros(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = world_size[0]
UpperCamelCase : Union[str, Any] = world_size[1]
def snake_case_ ( self ) -> Dict:
print(self.w )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCamelCase : List[str] = cell.position[0]
UpperCamelCase : Tuple = cell.position[1]
UpperCamelCase : Optional[Any] = []
for n in neughbour_cord:
UpperCamelCase : Any = current_x + n[0]
UpperCamelCase : Optional[int] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCamelCase : List[Any] = Cell()
UpperCamelCase : Optional[int] = (x, y)
UpperCamelCase : List[Any] = cell
neighbours.append(SCREAMING_SNAKE_CASE_ )
return neighbours
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Tuple ) -> Dict:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Optional[int] = []
_open.append(snake_case__ )
while _open:
UpperCamelCase : List[str] = np.argmin([n.f for n in _open] )
UpperCamelCase : Optional[int] = _open[min_f]
_closed.append(_open.pop(snake_case__ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case__ ):
for c in _closed:
if c == n:
continue
UpperCamelCase : Tuple = current.g + 1
UpperCamelCase , UpperCamelCase : Any = n.position
UpperCamelCase , UpperCamelCase : str = goal.position
UpperCamelCase : Tuple = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCamelCase : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case__ )
UpperCamelCase : Any = []
while current.parent is not None:
path.append(current.position )
UpperCamelCase : Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__UpperCAmelCase = Gridworld()
# Start position and goal
__UpperCAmelCase = Cell()
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = Cell()
__UpperCAmelCase = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
__UpperCAmelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__UpperCAmelCase = 1
print(world.w)
| 119
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :List[str] , a :int ) -> List[str]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__UpperCamelCase : int = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(a )
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
__UpperCamelCase : Optional[Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : List[Any] = PyTorchBenchmark(a )
__UpperCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :Any ) -> Any:
__UpperCamelCase : Optional[Any] = "sgugger/tiny-distilbert-classification"
__UpperCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
__UpperCamelCase : Any = PyTorchBenchmark(a )
__UpperCamelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
__UpperCamelCase : str = "sshleifer/tiny-gpt2"
__UpperCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , torchscript=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : List[str] = PyTorchBenchmark(a )
__UpperCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def _lowerCamelCase ( self :List[str] ) -> str:
__UpperCamelCase : List[str] = "sshleifer/tiny-gpt2"
__UpperCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , fpaa=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Any = PyTorchBenchmark(a )
__UpperCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : List[str] = "sshleifer/tiny-gpt2"
__UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(a )
# set architectures equal to `None`
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Dict = PyTorchBenchmark(a , configs=[config] )
__UpperCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :List[Any] ) -> Any:
__UpperCamelCase : int = "sshleifer/tiny-gpt2"
__UpperCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Tuple = PyTorchBenchmark(a )
__UpperCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def _lowerCamelCase ( self :Optional[Any] ) -> Tuple:
__UpperCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=a , multi_process=a , )
__UpperCamelCase : int = PyTorchBenchmark(a )
__UpperCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self :int ) -> List[str]:
__UpperCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : Dict = AutoConfig.from_pretrained(a )
__UpperCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Any = PyTorchBenchmark(a , configs=[config] )
__UpperCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :Optional[Any] ) -> List[str]:
__UpperCamelCase : Tuple = "sshleifer/tinier_bart"
__UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(a )
__UpperCamelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Dict = PyTorchBenchmark(a , configs=[config] )
__UpperCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]:
__UpperCamelCase : int = "sshleifer/tiny-gpt2"
__UpperCamelCase : str = AutoConfig.from_pretrained(a )
__UpperCamelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : str = PyTorchBenchmark(a , configs=[config] )
__UpperCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self :List[str] ) -> Any:
__UpperCamelCase : List[str] = "sshleifer/tinier_bart"
__UpperCamelCase : Any = AutoConfig.from_pretrained(a )
__UpperCamelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Any = PyTorchBenchmark(a , configs=[config] )
__UpperCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Any = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , "inf_time.csv" ) , train_memory_csv_file=os.path.join(a , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(a , "inf_mem.csv" ) , train_time_csv_file=os.path.join(a , "train_time.csv" ) , env_info_csv_file=os.path.join(a , "env.csv" ) , multi_process=a , )
__UpperCamelCase : List[Any] = PyTorchBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(a , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(a , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(a , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(a , "env.csv" ) ).exists() )
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
__UpperCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(a :Tuple ):
self.assertTrue(hasattr(a , "sequential" ) )
self.assertTrue(hasattr(a , "cumulative" ) )
self.assertTrue(hasattr(a , "current" ) )
self.assertTrue(hasattr(a , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , "log.txt" ) , log_print=a , trace_memory_line_by_line=a , multi_process=a , )
__UpperCamelCase : Optional[Any] = PyTorchBenchmark(a )
__UpperCamelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(a , "log.txt" ) ).exists() )
| 151
|
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.array) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 1
|
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( _lowercase : list) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
a__ : List[Any] = {"""+""", """-""", """*""", """/"""}
a__ : list[Any] = []
for token in postfix_notation:
if token in operations:
a__ , a__ : Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b)
elif token == "-":
stack.append(a - b)
elif token == "*":
stack.append(a * b)
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1)
else:
stack.append(a // b)
else:
stack.append(int(_lowercase))
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170
| 1
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int = 50 ):
'''simple docstring'''
_UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326
|
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 14
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : List[str] =["""pixel_values"""]
def __init__( self : Dict , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int=PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Any , ):
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor
SCREAMING_SNAKE_CASE__ = resample
super().__init__(**UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[ChannelDimension] = None , **UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_image_size(UpperCAmelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE__ = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = resize(UpperCAmelCase_ , (new_h, new_w) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
return image
def A_ ( self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[ChannelDimension] = None , **UpperCAmelCase_ : List[str] ):
return rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[TensorType, str]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase_ ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(UpperCAmelCase_ , size_divisor=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(UpperCAmelCase_ , scale=1 / 255 ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 176
| 0
|
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A = 1
A = 1
while repunit:
A = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _A ( _a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
A = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 77
|
"""simple docstring"""
import pytest
UpperCAmelCase ="__dummy_dataset1__"
UpperCAmelCase ="\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( _a : str , _a : List[Any] , _a : List[Any] ):
"""simple docstring"""
A = dataset_loading_script_name
A = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=_a )
A = script_dir / f'{script_name}.py'
with open(_a , """w""" ) as f:
f.write(_a )
return str(_a )
| 77
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Dict = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''distilbert'''
a__ ={
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , A=3_0_5_2_2 , A=5_1_2 , A=False , A=6 , A=1_2 , A=7_6_8 , A=4 * 7_6_8 , A=0.1 , A=0.1 , A="gelu" , A=0.02 , A=0.1 , A=0.2 , A=0 , **A , ) -> Dict:
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Tuple = sinusoidal_pos_embds
_UpperCAmelCase : int = n_layers
_UpperCAmelCase : List[str] = n_heads
_UpperCAmelCase : Dict = dim
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : List[Any] = activation
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = qa_dropout
_UpperCAmelCase : str = seq_classif_dropout
super().__init__(**A , pad_token_id=A )
class _UpperCAmelCase ( a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 263
|
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263
| 1
|
'''simple docstring'''
from __future__ import annotations
class __A :
def __init__(self : List[str] , __a : str , __a : List[str] ):
UpperCAmelCase_ , UpperCAmelCase_ = text, pattern
UpperCAmelCase_ , UpperCAmelCase_ = len(lowercase_ ), len(lowercase_ )
def _lowercase (self : str , __a : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowercase (self : List[Any] , __a : Any ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowercase (self : Any ):
UpperCAmelCase_ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase_ = self.mismatch_in_text(lowercase_ )
if mismatch_index == -1:
positions.append(lowercase_ )
else:
UpperCAmelCase_ = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE_: Optional[int] ='ABAABA'
SCREAMING_SNAKE_CASE_: str ='AB'
SCREAMING_SNAKE_CASE_: Any =BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE_: Union[str, Any] =bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 364
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Dict = """t5"""
a__ : List[str] = ["""past_key_values"""]
a__ : Union[str, Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__(self : List[str] , __a : Any=32128 , __a : Optional[int]=512 , __a : Union[str, Any]=64 , __a : List[str]=2048 , __a : Optional[int]=6 , __a : Union[str, Any]=None , __a : Union[str, Any]=8 , __a : Dict=32 , __a : List[Any]=128 , __a : Optional[Any]=0.1 , __a : List[Any]=1E-6 , __a : str=1.0 , __a : Dict="relu" , __a : int=True , __a : int=True , __a : Optional[Any]=0 , __a : Dict=1 , **__a : List[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split("-" )
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == "gated"
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = "gelu_new"
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : str ):
UpperCAmelCase_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ = "past_encoder_sequence + sequence"
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
return common_inputs
@property
def _lowercase (self : Optional[int] ):
return 13
| 106
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.