code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , a : Any=3_6000 , a : Optional[int]=1280 , a : Tuple=1024 , a : Optional[Any]=8192 , a : List[str]=4096 , a : Optional[Any]=128 , a : Optional[Any]=10 , a : List[Any]=0 , a : Union[str, Any]=16 , a : str=16 , a : Tuple=128 , a : List[str]=0.0 , a : Union[str, Any]=1e-5 , a : int=False , a : Optional[int]=0.0 , a : Union[str, Any]="float32" , a : str=False , a : Dict=False , a : Dict=False , a : Optional[Any]=0.002 , a : Tuple=False , a : List[str]=True , a : int=3_5998 , a : Union[str, Any]=3_5995 , a : int=3_5999 , **a : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = d_ff
SCREAMING_SNAKE_CASE : Optional[Any] = d_ext
SCREAMING_SNAKE_CASE : List[str] = d_spout
SCREAMING_SNAKE_CASE : Optional[Any] = num_switch_layers
SCREAMING_SNAKE_CASE : str = num_ext_layers
SCREAMING_SNAKE_CASE : int = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = num_experts
SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity
SCREAMING_SNAKE_CASE : List[str] = dropout_rate
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : List[str] = router_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = router_jitter_noise
SCREAMING_SNAKE_CASE : Tuple = router_dtype
SCREAMING_SNAKE_CASE : Dict = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Tuple = output_hidden_states
SCREAMING_SNAKE_CASE : Union[str, Any] = output_attentions
SCREAMING_SNAKE_CASE : str = initializer_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = output_router_logits
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
super().__init__(
separator_token_id=a , pad_token_id=a , eos_token_id=a , **a , )
| 25
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
| 1
|
'''simple docstring'''
def snake_case__ ( a , a , a ) -> str:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowerCAmelCase , n - 1 , __lowerCAmelCase ) * a) % mod
else:
snake_case__ = binary_exponentiation(__lowerCAmelCase , n / 2 , __lowerCAmelCase )
return (b * b) % mod
# a prime number
a__ = 701
a__ = 1_000_000_000
a__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 709
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__( __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ : Tuple = DebertaVaTokenizer
UpperCAmelCase_ : Any = DebertaVaTokenizerFast
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Tuple = True
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase( self : Optional[int] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case__ = """this is a test"""
snake_case__ = """this is a test"""
return input_text, output_text
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = """<pad>"""
snake_case__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__UpperCamelCase ) , 3_0_0_0_1 )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = """ \tHeLLo!how \n Are yoU? """
snake_case__ = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = """ \tHeLLo!how \n Are yoU? """
snake_case__ = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(__UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = """This is a test"""
snake_case__ = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
snake_case__ = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
snake_case__ = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# fmt: off
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
snake_case__ = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
snake_case__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = DebertaVaTokenizer(__UpperCamelCase )
snake_case__ = tokenizer.encode("""sequence builders""" )
snake_case__ = tokenizer.encode("""multi-sequence build""" )
snake_case__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCamelCase , )
@slow
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = {"""input_ids""": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 566
| 0
|
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :int = 60_08_51_47_51_43 ):
try:
SCREAMING_SNAKE_CASE : Optional[int] = int(_SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE : List[Any] = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE : Tuple = n
return int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 507
|
'''simple docstring'''
from __future__ import annotations
def __lowercase (_SCREAMING_SNAKE_CASE :list[int] ):
if not nums:
return 0
SCREAMING_SNAKE_CASE : Tuple = nums[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for num in nums[1:]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (
max_excluding + num,
max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
)
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507
| 1
|
def a ( ) -> int:
'''simple docstring'''
return 1
def a ( A__ ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a ( A__ ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(A__ )
def a ( A__ ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(A__ )
def a ( A__ ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(A__ )
def a ( A__ ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(A__ )
def a ( A__ ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(A__ )
def a ( A__ ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(A__ )
def a ( A__ = 2_0_0 ) -> int:
'''simple docstring'''
return two_pound(A__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 250
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :Tuple = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
| 1
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCamelCase__ =Mapping[str, np.ndarray]
UpperCamelCase__ =Mapping[str, Any] # Is a nested dict.
UpperCamelCase__ =0.01
@dataclasses.dataclass(frozen=__lowercase )
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__snake_case = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__snake_case = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__snake_case = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__snake_case = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__snake_case = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__snake_case = None
# Templates used to generate this protein (prediction-only)
__snake_case = None
# Chain corresponding to each parent
__snake_case = None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = R"(\[[A-Z]+\]\n)"
_SCREAMING_SNAKE_CASE : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase, __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
_SCREAMING_SNAKE_CASE : Iterator[Tuple[str, List[str]]] = zip(tags[0::2], [l.split("\n" ) for l in tags[1::2]] )
_SCREAMING_SNAKE_CASE : List[str] = ["N", "CA", "C"]
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : str = None
for g in groups:
if "[PRIMARY]" == g[0]:
_SCREAMING_SNAKE_CASE : str = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
_SCREAMING_SNAKE_CASE : List[str] = "X" # FIXME: strings are immutable
_SCREAMING_SNAKE_CASE : Tuple = np.array(
[residue_constants.restype_order.get(__lowerCamelCase, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_SCREAMING_SNAKE_CASE : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase, g[1][axis].split() ) ) )
_SCREAMING_SNAKE_CASE : int = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_SCREAMING_SNAKE_CASE : Tuple = np.array(list(map({"-": 0, "+": 1}.get, g[1][0].strip() ) ) )
_SCREAMING_SNAKE_CASE : str = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase, atom_mask=__lowerCamelCase, aatype=__lowerCamelCase, residue_index=np.arange(len(__lowerCamelCase ) ), b_factors=__lowerCamelCase, )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = 0 ):
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Tuple = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
_SCREAMING_SNAKE_CASE : int = prot.parents
_SCREAMING_SNAKE_CASE : Tuple = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_SCREAMING_SNAKE_CASE : Tuple = [p for i, p in zip(__lowerCamelCase, __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["N/A"]
pdb_headers.append(f"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Optional[int] = pdb_str.split("\n" )
_SCREAMING_SNAKE_CASE : Tuple = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
_SCREAMING_SNAKE_CASE : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_SCREAMING_SNAKE_CASE : Any = []
if prot.parents_chain_index is not None:
_SCREAMING_SNAKE_CASE : Dict[str, List[str]] = {}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ), [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_SCREAMING_SNAKE_CASE : int = parent_dict.get(str(__lowerCamelCase ), ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [["N/A"]]
def make_parent_line(__lowerCamelCase ) -> str:
return f"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = parents_per_chain[chain_counter]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowerCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r], "UNK" )
_SCREAMING_SNAKE_CASE : str = residue_constants.atom_types
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : List[Any] = prot.atom_mask
_SCREAMING_SNAKE_CASE : Optional[Any] = prot.aatype
_SCREAMING_SNAKE_CASE : str = prot.atom_positions
_SCREAMING_SNAKE_CASE : Any = prot.residue_index.astype(np.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = prot.b_factors
_SCREAMING_SNAKE_CASE : List[Any] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_SCREAMING_SNAKE_CASE : Tuple = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = aatype.shape[0]
_SCREAMING_SNAKE_CASE : str = 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : str = string.ascii_uppercase
_SCREAMING_SNAKE_CASE : Any = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
_SCREAMING_SNAKE_CASE : Optional[Any] = "ATOM"
_SCREAMING_SNAKE_CASE : Dict = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ""
_SCREAMING_SNAKE_CASE : List[Any] = ""
_SCREAMING_SNAKE_CASE : Optional[Any] = 1.00
_SCREAMING_SNAKE_CASE : Any = atom_name[0] # Protein supports only C, N, O, S, this works.
_SCREAMING_SNAKE_CASE : List[Any] = ""
_SCREAMING_SNAKE_CASE : int = "A"
if chain_index is not None:
_SCREAMING_SNAKE_CASE : int = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_SCREAMING_SNAKE_CASE : List[Any] = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
_SCREAMING_SNAKE_CASE : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Tuple = chain_index[i + 1]
if should_terminate:
# Close the chain.
_SCREAMING_SNAKE_CASE : str = "TER"
_SCREAMING_SNAKE_CASE : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase, __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, ):
return Protein(
aatype=features["aatype"], atom_positions=result["final_atom_positions"], atom_mask=result["final_atom_mask"], residue_index=features["residue_index"] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ), chain_index=__lowerCamelCase, remark=__lowerCamelCase, parents=__lowerCamelCase, parents_chain_index=__lowerCamelCase, )
| 249
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = checkpoints.load_tax_checkpoint(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_SCREAMING_SNAKE_CASE : List[str] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_SCREAMING_SNAKE_CASE : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : int = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE : List[str] = f"""layers_{str(__lowerCamelCase )}"""
# Self-Attention
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_SCREAMING_SNAKE_CASE : List[str] = flax_model.params["encoder"]["block"][str(__lowerCamelCase )]["layer"]
_SCREAMING_SNAKE_CASE : Tuple = tax_attention_key
_SCREAMING_SNAKE_CASE : List[Any] = tax_attention_out
_SCREAMING_SNAKE_CASE : Tuple = tax_attention_query
_SCREAMING_SNAKE_CASE : List[str] = tax_attention_value
_SCREAMING_SNAKE_CASE : Any = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : str = tax_global_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE : List[str] = tax_mlp_wi
_SCREAMING_SNAKE_CASE : int = tax_mlp_wo
_SCREAMING_SNAKE_CASE : Optional[int] = tax_mlp_layer_norm
_SCREAMING_SNAKE_CASE : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : str = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : Tuple = tax_encoder_global_rel_embedding
# Assigning
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE : Dict = f"""layers_{str(__lowerCamelCase )}"""
# Self-Attention
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_SCREAMING_SNAKE_CASE : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_SCREAMING_SNAKE_CASE : List[Any] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_enc_dec_attention_module["key"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_enc_dec_attention_module["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_enc_dec_attention_module["query"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_SCREAMING_SNAKE_CASE : int = flax_model.params["decoder"]["block"][str(__lowerCamelCase )]["layer"]
_SCREAMING_SNAKE_CASE : Any = tax_attention_key
_SCREAMING_SNAKE_CASE : Optional[int] = tax_attention_out
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_attention_query
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_attention_value
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_pre_attention_layer_norm
_SCREAMING_SNAKE_CASE : Optional[int] = tax_enc_dec_attention_key
_SCREAMING_SNAKE_CASE : Dict = tax_enc_dec_attention_out
_SCREAMING_SNAKE_CASE : Dict = tax_enc_dec_attention_query
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_enc_dec_attention_value
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_cross_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi
_SCREAMING_SNAKE_CASE : List[str] = tax_mlp_wo
_SCREAMING_SNAKE_CASE : Any = txa_mlp_layer_norm
_SCREAMING_SNAKE_CASE : List[str] = flax_model_decoder_layer_block
# Decoder Normalization
_SCREAMING_SNAKE_CASE : Optional[int] = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_SCREAMING_SNAKE_CASE : List[str] = txa_decoder_norm
# Only for layer 0:
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : Tuple = tax_decoder_rel_embedding
# Token Embeddings
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["token_embedder"]["embedding"]
_SCREAMING_SNAKE_CASE : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
UpperCamelCase__ =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 249
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 262
|
from collections import defaultdict
from math import ceil, sqrt
def A__ ( SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ = 1_0 ) -> int:
lowerCamelCase : defaultdict =defaultdict(SCREAMING_SNAKE_CASE_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase : Any =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase : str =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 262
| 1
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_a = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
_a = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
_a = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
_a = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
_a = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Value('''string'''),
}) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def UpperCAmelCase ( self , __a , __a , __a=[1, 10, 1_00] , __a=4 , __a=3.0) -> int:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''')
with ThreadPoolExecutor(max_workers=__a) as executor:
_UpperCamelCase = []
_UpperCamelCase = Counter()
_UpperCamelCase = 0
_UpperCamelCase = defaultdict(__a)
for task_id, (candidates, test_case) in enumerate(zip(__a , __a)):
for candidate in candidates:
_UpperCamelCase = candidate + '''\n''' + test_case
_UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCamelCase = executor.submit(__a , *__a)
futures.append(__a)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__a):
_UpperCamelCase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result))
_UpperCamelCase , _UpperCamelCase = [], []
for result in results.values():
result.sort()
_UpperCamelCase = [r[1]['''passed'''] for r in result]
total.append(len(__a))
correct.append(sum(__a))
_UpperCamelCase = np.array(__a)
_UpperCamelCase = np.array(__a)
_UpperCamelCase = k
_UpperCamelCase = {F'''pass@{k}''': estimate_pass_at_k(__a , __a , __a).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
def estimator(__snake_case, __snake_case, __snake_case ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = itertools.repeat(__snake_case, len(__snake_case ) )
else:
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = iter(__snake_case )
return np.array([estimator(int(__snake_case ), int(__snake_case ), __snake_case ) for n, c in zip(__snake_case, __snake_case )] )
| 19
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionInpaintPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
snake_case_ : Dict = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case_ : Dict = CLIPTextModel(_lowercase )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowercase ) ).to(_lowercase )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Any = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : str = torch.manual_seed(_lowercase )
else:
snake_case_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline(**_lowercase )
snake_case_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = self.get_dummy_inputs(_lowercase )
snake_case_ : List[str] = sd_pipe(**_lowercase ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Optional[int] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = PNDMScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 58
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_,UpperCAmelCase_,UpperCAmelCase_ =False, False, False
@dataclass
class __UpperCamelCase :
'''simple docstring'''
__a : Dict =None
__a : str =True
__a : List[Any] =True
__a : Optional[int] =None
# Automatically constructed
__a : Optional[Any] ="""dict"""
__a : Union[str, Any] =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__a : int =field(default="""Audio""" , init=__A , repr=__A )
def __call__( self ):
return self.pa_type
def __snake_case ( self , UpperCAmelCase_ ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return {"bytes": None, "path": value}
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase = BytesIO()
sf.write(UpperCAmelCase_ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
lowerCAmelCase = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
lowerCAmelCase = BytesIO(bytes() )
sf.write(UpperCAmelCase_ , UpperCAmelCase_ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
lowerCAmelCase = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
lowerCAmelCase = xsplitext(UpperCAmelCase_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
lowerCAmelCase = token_per_repo_id or {}
lowerCAmelCase = path.split('''::''' )[-1]
try:
lowerCAmelCase = string_to_dict(UpperCAmelCase_ , config.HUB_DATASETS_URL )['''repo_id''']
lowerCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase = None
with xopen(UpperCAmelCase_ , '''rb''' , use_auth_token=UpperCAmelCase_ ) as f:
lowerCAmelCase = sf.read(UpperCAmelCase_ )
else:
lowerCAmelCase = sf.read(UpperCAmelCase_ )
lowerCAmelCase = array.T
if self.mono:
lowerCAmelCase = librosa.to_mono(UpperCAmelCase_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase = librosa.resample(UpperCAmelCase_ , orig_sr=UpperCAmelCase_ , target_sr=self.sampling_rate )
lowerCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __snake_case ( self ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __snake_case ( self , UpperCAmelCase_ ):
if pa.types.is_string(storage.type ):
lowerCAmelCase = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.binary() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
lowerCAmelCase = pa.array([Audio().encode_example(UpperCAmelCase_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowerCAmelCase = storage.field('''bytes''' )
else:
lowerCAmelCase = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowerCAmelCase = storage.field('''path''' )
else:
lowerCAmelCase = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(UpperCAmelCase_ , self.pa_type )
def __snake_case ( self , UpperCAmelCase_ ):
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase_ ):
with xopen(UpperCAmelCase_ , '''rb''' ) as f:
lowerCAmelCase = f.read()
return bytes_
lowerCAmelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase = pa.array(
[os.path.basename(UpperCAmelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase_ , self.pa_type )
| 708
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case )
lowerCAmelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase = sum(single_char_strings.values() )
# one length string
lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase = single_char_strings[ch]
lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_snake_case ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase = sum(two_char_strings.values() )
lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase = cha + cha
if sequence in two_char_strings:
lowerCAmelCase = two_char_strings[sequence]
lowerCAmelCase = int(_snake_case ) / all_sum
my_sec_sum += prob * math.loga(_snake_case )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = Counter() # type: ignore
lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 33
| 0
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
"""simple docstring"""
def __init__(self: Optional[int] , __UpperCAmelCase: int , __UpperCAmelCase: List[Any]=13 , __UpperCAmelCase: Any=30 , __UpperCAmelCase: List[str]=2 , __UpperCAmelCase: Optional[int]=3 , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: str=True , __UpperCAmelCase: int=32 , __UpperCAmelCase: Tuple=5 , __UpperCAmelCase: Optional[Any]=4 , __UpperCAmelCase: Dict=37 , __UpperCAmelCase: List[Any]="gelu" , __UpperCAmelCase: List[Any]=0.1 , __UpperCAmelCase: Any=0.1 , __UpperCAmelCase: Optional[Any]=10 , __UpperCAmelCase: str=0.02 , __UpperCAmelCase: Any=3 , __UpperCAmelCase: Tuple=0.6 , __UpperCAmelCase: str=None , ) -> List[Any]:
'''simple docstring'''
__a : int = parent
__a : Any = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : List[Any] = is_training
__a : Tuple = use_labels
__a : str = hidden_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Any = intermediate_size
__a : str = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Tuple = type_sequence_label_size
__a : Any = initializer_range
__a : Dict = mask_ratio
__a : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__a : Any = (image_size // patch_size) ** 2
__a : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase__ (self: Any ) -> str:
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[Any] = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase__ (self: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple , __UpperCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
__a : str = ViTMAEModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : int = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self: Tuple , __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple , __UpperCAmelCase: Dict ) -> Dict:
'''simple docstring'''
__a : Optional[Any] = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : List[str] = model(__UpperCAmelCase )
__a : Union[str, Any] = (self.image_size // self.patch_size) ** 2
__a : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Optional[int] = model(__UpperCAmelCase )
__a : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase__ (self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__a : List[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Dict = config_and_inputs
__a : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
snake_case__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCAmelCase__ (self: int ) -> Dict:
'''simple docstring'''
__a : List[str] = ViTMAEModelTester(self )
__a : str = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ (self: Tuple ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def UpperCAmelCase__ (self: str ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Dict ) -> str:
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase__ (self: Dict ) -> Tuple:
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCAmelCase )
__a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase__ (self: Any ) -> Optional[int]:
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase__ (self: List[str] ) -> Tuple:
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: int , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> Dict:
'''simple docstring'''
np.random.seed(2 )
__a : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__a : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__a : Tuple = torch.from_numpy(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__a : Union[str, Any] = pt_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ (self: str ) -> Dict:
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__a : Dict = outputs[0].cpu().numpy()
__a : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
__a : Any = model_class.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
# Make sure we don't have nans
__a : Dict = after_outputs[0].cpu().numpy()
__a : str = 0
__a : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Optional[int] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase__ (self: List[str] ) -> List[Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ (self: Tuple ) -> int:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Tuple = ViTMAEModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def a_ () -> Optional[Any]:
"""simple docstring"""
__a : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ (self: Optional[int] ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ (self: str ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(2 )
__a : int = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCAmelCase )
__a : Dict = self.default_image_processor
__a : List[str] = prepare_img()
__a : Tuple = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__a : Optional[Any] = ViTMAEConfig()
__a : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__a : int = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__a : Union[str, Any] = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) )
# verify the logits
__a : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__a : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1E-4 ) )
| 351
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
"""simple docstring"""
def __init__(self: Optional[int] , __UpperCAmelCase: int , __UpperCAmelCase: List[Any]=13 , __UpperCAmelCase: Any=30 , __UpperCAmelCase: List[str]=2 , __UpperCAmelCase: Optional[int]=3 , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: str=True , __UpperCAmelCase: int=32 , __UpperCAmelCase: Tuple=5 , __UpperCAmelCase: Optional[Any]=4 , __UpperCAmelCase: Dict=37 , __UpperCAmelCase: List[Any]="gelu" , __UpperCAmelCase: List[Any]=0.1 , __UpperCAmelCase: Any=0.1 , __UpperCAmelCase: Optional[Any]=10 , __UpperCAmelCase: str=0.02 , __UpperCAmelCase: Any=3 , __UpperCAmelCase: Tuple=0.6 , __UpperCAmelCase: str=None , ) -> List[Any]:
'''simple docstring'''
__a : int = parent
__a : Any = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : List[Any] = is_training
__a : Tuple = use_labels
__a : str = hidden_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Any = intermediate_size
__a : str = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Tuple = type_sequence_label_size
__a : Any = initializer_range
__a : Dict = mask_ratio
__a : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__a : Any = (image_size // patch_size) ** 2
__a : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase__ (self: Any ) -> str:
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[Any] = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase__ (self: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple , __UpperCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
__a : str = ViTMAEModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : int = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self: Tuple , __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple , __UpperCAmelCase: Dict ) -> Dict:
'''simple docstring'''
__a : Optional[Any] = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : List[str] = model(__UpperCAmelCase )
__a : Union[str, Any] = (self.image_size // self.patch_size) ** 2
__a : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Optional[int] = model(__UpperCAmelCase )
__a : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase__ (self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__a : List[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Dict = config_and_inputs
__a : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
snake_case__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCAmelCase__ (self: int ) -> Dict:
'''simple docstring'''
__a : List[str] = ViTMAEModelTester(self )
__a : str = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ (self: Tuple ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def UpperCAmelCase__ (self: str ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Dict ) -> str:
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase__ (self: Dict ) -> Tuple:
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCAmelCase )
__a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase__ (self: Any ) -> Optional[int]:
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase__ (self: List[str] ) -> Tuple:
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: int , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> Dict:
'''simple docstring'''
np.random.seed(2 )
__a : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__a : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__a : Tuple = torch.from_numpy(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__a : Union[str, Any] = pt_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__ (self: str ) -> Dict:
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__a : Dict = outputs[0].cpu().numpy()
__a : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
__a : Any = model_class.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
# Make sure we don't have nans
__a : Dict = after_outputs[0].cpu().numpy()
__a : str = 0
__a : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Optional[int] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCAmelCase__ (self: Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase__ (self: List[str] ) -> List[Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ (self: Tuple ) -> int:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Tuple = ViTMAEModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def a_ () -> Optional[Any]:
"""simple docstring"""
__a : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ (self: Optional[int] ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ (self: str ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(2 )
__a : int = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCAmelCase )
__a : Dict = self.default_image_processor
__a : List[str] = prepare_img()
__a : Tuple = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__a : Optional[Any] = ViTMAEConfig()
__a : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__a : int = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__a : Union[str, Any] = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) )
# verify the logits
__a : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__a : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1E-4 ) )
| 351
| 1
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 516
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def A__ ( *__A : Optional[int] , **__A : Union[str, Any] ) ->Dict:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : int ) ->List[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Dict , **__A : str ) ->Tuple:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Optional[Any] , **__A : Dict ) ->Optional[Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : str , **__A : str ) ->Any:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : List[Any] , **__A : Dict ) ->Union[str, Any]:
requires_backends(__A , ['''torch'''] )
def A__ ( *__A : Optional[int] , **__A : Optional[int] ) ->Any:
requires_backends(__A , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
'''simple docstring'''
lowercase_ = ["""torch"""]
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __UpperCamelCase ( cls , *lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 516
| 1
|
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase ) -> bool:
_a = len(_UpperCAmelCase ) > 0 and all(value > 0.0 for value in values )
return result
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_UpperCAmelCase , _UpperCAmelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 562
|
lowercase_ = 9.80665
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = g ) -> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 562
| 1
|
'''simple docstring'''
A_ : Dict ={'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
A_ : List[Any] =['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( __snake_case : int , __snake_case : Optional[Any] , __snake_case : str) -> List[str]:
lowerCAmelCase_ = start
# add current to visited
visited.append(__snake_case)
lowerCAmelCase_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCAmelCase_ = topological_sort(__snake_case , __snake_case , __snake_case)
# if all neighbors visited add current to sort
sort.append(__snake_case)
# if all vertices haven't been visited select a new one to visit
if len(__snake_case) != len(__snake_case):
for vertice in vertices:
if vertice not in visited:
lowerCAmelCase_ = topological_sort(__snake_case , __snake_case , __snake_case)
# return sort
return sort
if __name__ == "__main__":
A_ : str =topological_sort('''a''', [], [])
print(sort)
| 606
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Tuple =[
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def snake_case_ ( __snake_case : List[Any] , __snake_case : List[Any]=None , __snake_case : Dict=None , __snake_case : Dict=None) -> Dict:
lowerCAmelCase_ = True
while ask_again:
lowerCAmelCase_ = input(__snake_case)
try:
if default is not None and len(__snake_case) == 0:
return default
return convert_value(__snake_case) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__snake_case)
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : int=[] , __snake_case : Any=None , __snake_case : List[str]=0) -> str:
lowerCAmelCase_ = BulletMenu(__snake_case , __snake_case)
lowerCAmelCase_ = menu.run(default_choice=__snake_case)
return convert_value(__snake_case) if convert_value is not None else result
def snake_case_ ( __snake_case : Tuple) -> Any:
lowerCAmelCase_ = int(__snake_case)
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value])
def snake_case_ ( __snake_case : List[str]) -> Union[str, Any]:
lowerCAmelCase_ = int(__snake_case)
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value])
def snake_case_ ( __snake_case : Tuple) -> int:
lowerCAmelCase_ = int(__snake_case)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def snake_case_ ( __snake_case : Optional[int]) -> str:
lowerCAmelCase_ = int(__snake_case)
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value])
def snake_case_ ( __snake_case : int) -> Optional[Any]:
lowerCAmelCase_ = int(__snake_case)
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value])
def snake_case_ ( __snake_case : List[str]) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class __UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = super()._format_usage(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 606
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase_ : Union[str, Any] = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =list(s_dict.keys() )
for key in keys:
__magic_name__ : Optional[Any] =key
for k, v in WHISPER_MAPPING.items():
if k in key:
__magic_name__ : Tuple =new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"{key} -> {new_key}" )
__magic_name__ : int =s_dict.pop(lowerCamelCase )
return s_dict
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ , __magic_name__ : Optional[Any] =emb.weight.shape
__magic_name__ : Optional[Any] =nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__magic_name__ : Any =emb.weight.data
return lin_layer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
__magic_name__ : str =os.path.basename(lowerCamelCase )
__magic_name__ : Any =url.split("""/""" )[-2]
__magic_name__ : int =os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(lowerCamelCase ):
__magic_name__ : Any =open(lowerCamelCase , """rb""" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
__magic_name__ : List[str] =source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
__magic_name__ : Optional[int] =open(lowerCamelCase , """rb""" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
__magic_name__ : int =_download(_MODELS[checkpoint_path] )
else:
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Dict =original_checkpoint["""dims"""]
__magic_name__ : List[Any] =original_checkpoint["""model_state_dict"""]
__magic_name__ : Dict =state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
__magic_name__ : Optional[Any] =True
__magic_name__ : Dict =state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
__magic_name__ : Dict =WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
__magic_name__ : Tuple =WhisperForConditionalGeneration(lowerCamelCase )
__magic_name__ , __magic_name__ : Union[str, Any] =model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F" but all the following weights are missing {missing}" )
if tie_embeds:
__magic_name__ : List[Any] =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__magic_name__ : Optional[Any] =proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase_ : Dict = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 21
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : Any = KandinskyImgaImgPipeline
UpperCamelCase : int = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
UpperCamelCase : Optional[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCamelCase : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase : List[str] = False
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return self.time_input_dim
@property
def __snake_case ( self ):
return self.time_input_dim * 4
@property
def __snake_case ( self ):
return 100
@property
def __snake_case ( self ):
UpperCAmelCase__ : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase__ : Tuple = MultilingualCLIP(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase__ : Optional[Any] = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def __snake_case ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase__ : int = self.dummy_tokenizer
UpperCAmelCase__ : int = self.dummy_unet
UpperCAmelCase__ : Optional[Any] = self.dummy_movq
UpperCAmelCase__ : Any = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCAmelCase__ : List[Any] = DDIMScheduler(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase_ )
# create init_image
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Dict = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = 'cpu'
UpperCAmelCase__ : Any = self.get_dummy_components()
UpperCAmelCase__ : Any = self.pipeline_class(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Dict = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Any = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
UpperCAmelCase__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
UpperCAmelCase__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCAmelCase__ : Any = 'A red cartoon frog, 4k'
UpperCAmelCase__ : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
UpperCAmelCase__ : Any = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[Any] = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCAmelCase__ : Optional[Any] = pipeline(
UpperCamelCase_ , image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 110
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__: Union[str, Any] = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Union[str, Any] = ['MobileNetV2FeatureExtractor']
a__: List[Any] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: List[Any] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
a__: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 212
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__: int = '▁'
a__: List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = BertGenerationTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self ):
super().setUp()
A__ = BertGenerationTokenizer(__lowerCamelCase,keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = '''<s>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ),__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ),__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'''<unk>''' )
self.assertEqual(vocab_keys[1],'''<s>''' )
self.assertEqual(vocab_keys[-1],'''<pad>''' )
self.assertEqual(len(__lowerCamelCase ),1002 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size,1000 )
def UpperCamelCase ( self ):
A__ = BertGenerationTokenizer(__lowerCamelCase,keep_accents=__lowerCamelCase )
A__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ),[285, 46, 10, 170, 382],)
A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
],)
A__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],)
A__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
],)
@cached_property
def UpperCamelCase ( self ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase ( self ):
A__ = '''Hello World!'''
A__ = [1_8536, 2260, 101]
self.assertListEqual(__lowerCamelCase,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def UpperCamelCase ( self ):
A__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
A__ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(__lowerCamelCase,self.big_tokenizer.encode(__lowerCamelCase ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = ''' '''.join(__lowerCamelCase )
A__ = self.big_tokenizer.encode_plus(__lowerCamelCase,return_tensors='''pt''',return_token_type_ids=__lowerCamelCase )
A__ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence],return_tensors='''pt''',return_token_type_ids=__lowerCamelCase )
A__ = BertGenerationConfig()
A__ = BertGenerationEncoder(__lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowerCamelCase )
model(**__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
# fmt: off
A__ = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase,model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''',revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''',)
| 212
| 1
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
snake_case_ = (low + high) // 2
snake_case_ , snake_case_ , snake_case_ = max_subarray(_A , _A , _A )
snake_case_ , snake_case_ , snake_case_ = max_subarray(_A , mid + 1 , _A )
snake_case_ , snake_case_ , snake_case_ = max_cross_sum(_A , _A , _A , _A )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ , snake_case_ = float("-inf" ), -1
snake_case_ , snake_case_ = float("-inf" ), -1
snake_case_ = 0
for i in range(_A , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
snake_case_ = summ
snake_case_ = i
snake_case_ = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
snake_case_ = summ
snake_case_ = i
return max_left, max_right, (left_sum + right_sum)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = [randint(1 , _A ) for _ in range(_A )]
snake_case_ = time.time()
max_subarray(_A , 0 , input_size - 1 )
snake_case_ = time.time()
return end - start
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
snake_case_ = [time_max_subarray(_A ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(_A , _A ):
print(_A , "\t\t" , _A )
plt.plot(_A , _A )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 376
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : float , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : str , __lowercase : bool = False , ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Embedding(__lowercase , __lowercase )
snake_case_ = nn.Embedding(__lowercase , __lowercase )
snake_case_ = False
snake_case_ = nn.Dropout(p=__lowercase )
snake_case_ = TaConfig(
vocab_size=__lowercase , d_model=__lowercase , num_heads=__lowercase , d_kv=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase , feed_forward_proj=__lowercase , is_decoder=__lowercase , is_encoder_decoder=__lowercase , )
snake_case_ = nn.ModuleList()
for lyr_num in range(__lowercase ):
snake_case_ = TaBlock(__lowercase )
self.encoders.append(__lowercase )
snake_case_ = TaLayerNorm(__lowercase )
snake_case_ = nn.Dropout(p=__lowercase )
def snake_case__ ( self : str , __lowercase : Dict , __lowercase : str ):
"""simple docstring"""
snake_case_ = self.token_embedder(__lowercase )
snake_case_ = encoder_input_tokens.shape[1]
snake_case_ = torch.arange(__lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(__lowercase )
snake_case_ = self.dropout_pre(__lowercase )
# inverted the attention mask
snake_case_ = encoder_input_tokens.size()
snake_case_ = self.get_extended_attention_mask(__lowercase , __lowercase )
for lyr in self.encoders:
snake_case_ = lyr(__lowercase , __lowercase )[0]
snake_case_ = self.layer_norm(__lowercase )
return self.dropout_post(__lowercase ), encoder_inputs_mask
| 376
| 1
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_lowercase : List[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : List[str]=0 ) -> Optional[Any]:
"""simple docstring"""
A_ = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(lowerCamelCase__ ) )
A_ = torch.manual_seed(lowerCamelCase__ )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**lowerCamelCase__ ).images
A_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**lowerCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**lowerCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**lowerCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**lowerCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A_ = ort.SessionOptions()
A_ = False
return options
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = '''A fantasy landscape, trending on artstation'''
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCamelCase__ , output_type='''np''' , )
A_ = output.images
A_ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ = init_image.resize((1_2_8, 1_2_8) )
A_ = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
A_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = '''A fantasy landscape, trending on artstation'''
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCamelCase__ , output_type='''np''' , )
A_ = output.images
A_ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 719
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _lowercase :
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=1_3 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[str]=9_9 , lowerCamelCase__ : Optional[Any]=[1, 1, 2] , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Union[str, Any]=3_2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Union[str, Any]=3_7 , lowerCamelCase__ : List[Any]="gelu_new" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : List[Any]=5_1_2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : str=3 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=False , ) -> Union[str, Any]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = block_sizes
A_ = num_decoder_layers
A_ = d_model
A_ = n_head
A_ = d_head
A_ = d_inner
A_ = hidden_act
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = 2
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = initializer_std
# Used in the tests to check the size of the first attention layer
A_ = n_head
# Used in the tests to check the size of the first hidden state
A_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A_ = self.num_hidden_layers + 2
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , ) -> List[Any]:
"""simple docstring"""
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ = False
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ = False
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A_ = False
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A_ = False
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , ) -> List[str]:
"""simple docstring"""
A_ = TFFunnelForPreTraining(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
A_ = TFFunnelForMaskedLM(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFunnelForSequenceClassification(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
A_ = self.num_choices
A_ = TFFunnelForMultipleChoice(config=lowerCamelCase__ )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFunnelForTokenClassification(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str , ) -> str:
"""simple docstring"""
A_ = TFFunnelForQuestionAnswering(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : List[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Dict = False
_lowercase : Optional[int] = False
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ = TFFunnelModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@require_tf
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_lowercase : str = False
_lowercase : Any = False
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
A_ = TFFunnelModelTester(self , base=lowerCamelCase__ )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase__ )
def UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
| 563
| 0
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
a__ : int = logging.get_logger(__name__)
class __snake_case ( __magic_name__ ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 368
| 0
|
def __lowerCamelCase ( __a :int , __a :int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
A__ = str(bin(__a ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCamelCase ( __a :int , __a :int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
A__ = str(bin(__a ) )[2:]
if shift_amount >= len(__a ):
return "0b0"
A__ = binary_number[: len(__a ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCamelCase ( __a :int , __a :int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
A__ = """0""" + str(bin(__a ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
A__ = len(bin(__a )[3:] ) # Find 2's complement of number
A__ = bin(abs(__a ) - (1 << binary_number_length) )[3:]
A__ = (
"""1""" + """0""" * (binary_number_length - len(__a )) + binary_number
)
if shift_amount >= len(__a ):
return "0b" + binary_number[0] * len(__a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 247
| 0
|
'''simple docstring'''
A_ : List[str] =[
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
A_ : Optional[Any] =[
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
A_ : List[str] =[
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
A_ : Union[str, Any] =[
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
A_ : Optional[int] =[
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
A_ : Optional[Any] =[
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
A_ : int =[
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
A_ : str =[
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 274
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ (*UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ (self : int , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowercase__ = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = object_detector(examples[0] , threshold=0.0 )
lowercase__ = len(UpperCamelCase )
self.assertGreater(UpperCamelCase , 0 )
self.assertEqual(
UpperCamelCase , [
{
'''score''': ANY(UpperCamelCase ),
'''label''': ANY(UpperCamelCase ),
'''box''': {'''xmin''': ANY(UpperCamelCase ), '''ymin''': ANY(UpperCamelCase ), '''xmax''': ANY(UpperCamelCase ), '''ymax''': ANY(UpperCamelCase )},
}
for i in range(UpperCamelCase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
pass
@require_torch
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowercase__ = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
lowercase__ = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = pipeline('''zero-shot-object-detection''' )
lowercase__ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
lowercase__ = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
pass
@require_torch
@slow
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = 0.2
lowercase__ = pipeline('''zero-shot-object-detection''' )
lowercase__ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = 2
lowercase__ = pipeline('''zero-shot-object-detection''' )
lowercase__ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 460
| 0
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
SCREAMING_SNAKE_CASE_: List[Any] ='<<<<<<< This should probably be modified because it mentions: '
SCREAMING_SNAKE_CASE_: List[Any] ='=======\n>>>>>>>\n'
SCREAMING_SNAKE_CASE_: List[str] =[
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
SCREAMING_SNAKE_CASE_: Tuple =[
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def lowerCAmelCase_ ( snake_case_ : Namespace ) -> int:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __A ( UpperCamelCase__ ):
@staticmethod
def _lowercase (__a : ArgumentParser ):
UpperCAmelCase_ = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__a , required=__a , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__a , required=__a , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__a )
def __init__(self : Dict , __a : str , __a : str , *__a : Union[str, Any] ):
UpperCAmelCase_ = get_logger("datasets-cli/converting" )
UpperCAmelCase_ = tfds_path
UpperCAmelCase_ = datasets_directory
def _lowercase (self : Union[str, Any] ):
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase_ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
UpperCAmelCase_ = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ = os.listdir(__a )
else:
UpperCAmelCase_ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
UpperCAmelCase_ = os.path.join(__a , __a )
UpperCAmelCase_ = os.path.join(__a , __a )
if not os.path.isfile(__a ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__a , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = []
for line in lines:
UpperCAmelCase_ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase_ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase_ = ""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase_ = "from datasets import logging\n"
elif "getLogger" in out_line:
UpperCAmelCase_ = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase_ = True
UpperCAmelCase_ = list(filter(lambda __a : e in out_line , __a ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__a ) + "\n" )
out_lines.append(__a )
out_lines.append(__a )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase_ = re.sub(__a , __a , __a )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase_ = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __a )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
UpperCAmelCase_ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase_ = True
out_lines.append(__a )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase_ = f_name.replace(".py" , "" )
UpperCAmelCase_ = os.path.join(__a , __a )
UpperCAmelCase_ = os.path.join(__a , __a )
os.makedirs(__a , exist_ok=__a )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__a )
if needs_manual_update:
with_manual_update.append(__a )
with open(__a , "w" , encoding="utf-8" ) as f:
f.writelines(__a )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
UpperCAmelCase_ = os.path.basename(__a )
UpperCAmelCase_ = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__a , __a )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Any =['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCamelCase : Optional[int] =logging.get_logger(__name__)
_UpperCamelCase : Optional[int] ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : str ={
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Optional[Any] ={
'google/realm-cc-news-pretrained-embedder': 512,
'google/realm-cc-news-pretrained-encoder': 512,
'google/realm-cc-news-pretrained-scorer': 512,
'google/realm-cc-news-pretrained-openqa': 512,
'google/realm-orqa-nq-openqa': 512,
'google/realm-orqa-nq-reader': 512,
'google/realm-orqa-wq-openqa': 512,
'google/realm-orqa-wq-reader': 512,
}
_UpperCamelCase : Optional[int] ={
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Any = VOCAB_FILES_NAMES
__snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Tuple = RealmTokenizer
def __init__( self ,A__=None ,A__=None ,A__=True ,A__="[UNK]" ,A__="[SEP]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__=True ,A__=None ,**A__ ,):
super().__init__(
A__ ,tokenizer_file=A__ ,do_lower_case=A__ ,unk_token=A__ ,sep_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,tokenize_chinese_chars=A__ ,strip_accents=A__ ,**A__ ,)
_A : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,A__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,A__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,A__ ) != tokenize_chinese_chars
):
_A : List[Any] = getattr(A__ ,normalizer_state.pop('''type''' ) )
_A : List[Any] = do_lower_case
_A : Optional[int] = strip_accents
_A : List[str] = tokenize_chinese_chars
_A : str = normalizer_class(**A__ )
_A : List[Any] = do_lower_case
def A__ ( self ,A__ ,**A__ ):
_A : Optional[Any] = PaddingStrategy.MAX_LENGTH
_A : Any = text
_A : List[Any] = kwargs.pop('''text_pair''' ,A__ )
_A : Optional[Any] = kwargs.pop('''return_tensors''' ,A__ )
_A : Optional[int] = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(A__ ):
if batch_text_pair is not None:
_A : str = batch_text_pair[idx]
else:
_A : Any = None
_A : Optional[Any] = super().__call__(A__ ,A__ ,return_tensors=A__ ,**A__ )
_A : List[str] = encoded_candidates.get('''input_ids''' )
_A : int = encoded_candidates.get('''attention_mask''' )
_A : Optional[Any] = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(A__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A__ )
_A : str = {key: item for key, item in output_data.items() if len(A__ ) != 0}
return BatchEncoding(A__ ,tensor_type=A__ )
def A__ ( self ,A__ ,A__=None ):
_A : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self ,A__ ,A__ = None ):
_A : Optional[Any] = [self.sep_token_id]
_A : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self ,A__ ,A__ = None ):
_A : Union[str, Any] = self._tokenizer.model.save(A__ ,name=A__ )
return tuple(A__ )
| 206
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_UpperCamelCase : Optional[Any] =logging.get_logger(__name__)
def a__ () -> Any:
# Get the sagemaker specific mp parameters from smp_options variable.
_A : Any = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_A : Dict = json.loads(__lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_A : Optional[int] = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_A : Any = json.loads(__lowercase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCAmelCase__ ( __snake_case ):
__snake_case : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def A__ ( self ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' ,A__ ,)
@cached_property
def A__ ( self ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
_A : Union[str, Any] = torch.device('''cpu''' )
_A : List[str] = 0
elif is_sagemaker_model_parallel_available():
_A : Dict = smp.local_rank()
_A : Union[str, Any] = torch.device('''cuda''' ,A__ )
_A : Tuple = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' ,timeout=self.ddp_timeout_delta )
_A : int = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
_A : int = torch.device('''cuda''' ,self.local_rank )
_A : Union[str, Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_A : Dict = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_A : Union[str, Any] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' ,timeout=self.ddp_timeout_delta )
_A : Union[str, Any] = torch.device('''cuda''' ,self.local_rank )
_A : List[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(A__ )
return device
@property
def A__ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ):
return False
| 206
| 1
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
a_ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
a_ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
a_ = BeautifulSoup(res.text, 'html.parser')
a_ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"https://google.com{link.get('href')}")
| 523
|
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase : List[str] = len(UpperCamelCase_ ) - 1
def _lowerCamelCase ( self , UpperCamelCase_ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCamelCase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCamelCase_ ) , 5 ) == 1
return output_values
def _lowerCamelCase ( self , UpperCamelCase_ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase : Union[str, Any] = self.basis_function(UpperCamelCase_ )
__lowercase : Dict = 0.0
__lowercase : int = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _lowerCamelCase ( self , UpperCamelCase_ = 0.0_1 ) -> Optional[int]:
from matplotlib import pyplot as plt # type: ignore
__lowercase : list[float] = [] # x coordinates of points to plot
__lowercase : list[float] = [] # y coordinates of points to plot
__lowercase : str = 0.0
while t <= 1:
__lowercase : Union[str, Any] = self.bezier_curve_function(UpperCamelCase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase : Dict = [i[0] for i in self.list_of_points]
__lowercase : Optional[int] = [i[1] for i in self.list_of_points]
plt.plot(
UpperCamelCase_ , UpperCamelCase_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(UpperCamelCase_ , UpperCamelCase_ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 523
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCamelCase : Optional[Any] = HUGGINGFACE_HUB_CACHE
_lowerCamelCase : str = '''config.json'''
_lowerCamelCase : int = '''diffusion_pytorch_model.bin'''
_lowerCamelCase : Union[str, Any] = '''diffusion_flax_model.msgpack'''
_lowerCamelCase : Dict = '''model.onnx'''
_lowerCamelCase : List[str] = '''diffusion_pytorch_model.safetensors'''
_lowerCamelCase : Optional[Any] = '''weights.pb'''
_lowerCamelCase : Optional[Any] = '''https://huggingface.co'''
_lowerCamelCase : List[Any] = default_cache_path
_lowerCamelCase : Union[str, Any] = '''diffusers_modules'''
_lowerCamelCase : List[Any] = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCamelCase : int = ['''fp16''', '''non-ema''']
_lowerCamelCase : Dict = '''.self_attn'''
| 184
|
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
__A =psutil.Process()
__A =False
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =-1
while True:
__A =max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =True
__A =threading.Thread(target=self.peak_monitor )
__A =True
self.thread.start()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =False
self.thread.join()
return self.cpu_memory_peak
_lowerCamelCase : Union[str, Any] = PeakCPUMemory()
def A__ ( ) ->Dict:
# Time
__A ={'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__A =psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__A =torch.cuda.memory_allocated(__A )
torch.cuda.reset_peak_memory_stats()
return measures
def A__ ( __A : Any ) ->Any:
# Time
__A ={'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__A =(psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__A =(cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__A =(torch.cuda.memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
__A =(torch.cuda.max_memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
return measures
def A__ ( __A : Union[str, Any] , __A : Any ) ->Union[str, Any]:
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(__A )]:.2f}MiB''' )
__A =measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 184
| 1
|
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = ['''image_processor''', '''feature_extractor''']
snake_case__ : Tuple = '''TvltImageProcessor'''
snake_case__ : List[Any] = '''TvltFeatureExtractor'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
super().__init__(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
a_ : Any = image_processor
a_ : str = feature_extractor
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=False , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int , ) -> Dict:
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
a_ : Tuple = None
if images is not None:
a_ : str = self.image_processor(SCREAMING_SNAKE_CASE__ , mask_pixel=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images_mixed is not None:
a_ : List[str] = self.image_processor(SCREAMING_SNAKE_CASE__ , is_mixed=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if audio is not None:
a_ : int = self.feature_extractor(
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , mask_audio=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : List[str] = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
return output_dict
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Dict = self.image_processor.model_input_names
a_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 443
|
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> float:
"""simple docstring"""
a_ : Optional[int] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443
| 1
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
a__ : List[str] = logging.get_logger(__name__)
class __snake_case :
__lowerCAmelCase = None
@experimental
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
A__ , A__ , A__ , A__ , A__ , A__ , A__ )
return _map_with_joblib(A__ , A__ , A__ , A__ , A__ , A__ , A__ )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
snake_case__ = num_proc if num_proc <= len(A__ ) else len(A__ )
snake_case__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(A__ ):
snake_case__ = len(A__ ) // num_proc
snake_case__ = len(A__ ) % num_proc
snake_case__ = div * index + min(A__ , A__ )
snake_case__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(A__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(A__ )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(A__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
snake_case__ = None, None
if not disable_tqdm:
snake_case__ = (RLock(),), tqdm.set_lock
with Pool(A__ , initargs=A__ , initializer=A__ ) as pool:
snake_case__ = pool.map(A__ , A__ )
logger.info(f'''Finished {num_proc} processes''' )
snake_case__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(A__ )} objects''' )
return mapped
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=A__ ):
return joblib.Parallel()(
joblib.delayed(A__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __lowerCamelCase ( UpperCAmelCase_ ) ->Optional[Any]:
snake_case__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case__ = None
| 368
|
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35
| 0
|
from __future__ import annotations
UpperCAmelCase_ : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __A :
def __init__( self :int , __snake_case :dict[str, list[str]] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : List[Any] =graph
# mapping node to its parent in resulting breadth first tree
__magic_name__ : dict[str, str | None] ={}
__magic_name__ : Optional[int] =source_vertex
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Tuple ={self.source_vertex}
__magic_name__ : Tuple =None
__magic_name__ : Dict =[self.source_vertex] # first in first out queue
while queue:
__magic_name__ : Tuple =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case )
__magic_name__ : Dict =vertex
queue.append(__snake_case )
def A__ ( self :Union[str, Any] , __snake_case :str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__magic_name__ : Dict =self.parent.get(__snake_case )
if target_vertex_parent is None:
__magic_name__ : Union[str, Any] =(
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__snake_case )
return self.shortest_path(__snake_case ) + f"->{target_vertex}"
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 367
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self :List[str] , __snake_case :str , __snake_case :List[Any]=7 , __snake_case :List[str]=3 , __snake_case :int=30 , __snake_case :int=4_00 , __snake_case :List[str]=True , __snake_case :List[str]=None , __snake_case :Tuple=True , __snake_case :Any=[0.5, 0.5, 0.5] , __snake_case :Any=[0.5, 0.5, 0.5] , __snake_case :Union[str, Any]=True , __snake_case :str=1 / 2_55 , __snake_case :int=True , ):
'''simple docstring'''
__magic_name__ : Optional[int] =size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__magic_name__ : str =parent
__magic_name__ : Tuple =batch_size
__magic_name__ : Optional[Any] =num_channels
__magic_name__ : Optional[int] =min_resolution
__magic_name__ : Optional[Any] =max_resolution
__magic_name__ : Optional[Any] =do_resize
__magic_name__ : Any =size
__magic_name__ : str =do_normalize
__magic_name__ : str =image_mean
__magic_name__ : List[Any] =image_std
__magic_name__ : Optional[int] =do_rescale
__magic_name__ : List[str] =rescale_factor
__magic_name__ : Optional[int] =do_pad
def A__ ( self :Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self :List[str] , __snake_case :Any , __snake_case :List[Any]=False ):
'''simple docstring'''
if not batched:
__magic_name__ : Any =image_inputs[0]
if isinstance(__snake_case , Image.Image ):
__magic_name__ , __magic_name__ : Dict =image.size
else:
__magic_name__ , __magic_name__ : Dict =image.shape[1], image.shape[2]
if w < h:
__magic_name__ : Any =int(self.size["""shortest_edge"""] * h / w )
__magic_name__ : Dict =self.size["""shortest_edge"""]
elif w > h:
__magic_name__ : Union[str, Any] =self.size["""shortest_edge"""]
__magic_name__ : Optional[int] =int(self.size["""shortest_edge"""] * w / h )
else:
__magic_name__ : Any =self.size["""shortest_edge"""]
__magic_name__ : List[str] =self.size["""shortest_edge"""]
else:
__magic_name__ : List[str] =[]
for image in image_inputs:
__magic_name__ , __magic_name__ : Tuple =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[int] =max(__snake_case , key=lambda __snake_case : item[0] )[0]
__magic_name__ : Tuple =max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = DetaImageProcessor if is_vision_available() else None
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : int =DetaImageProcessingTester(self )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """image_mean""" ) )
self.assertTrue(hasattr(__snake_case , """image_std""" ) )
self.assertTrue(hasattr(__snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """do_rescale""" ) )
self.assertTrue(hasattr(__snake_case , """do_pad""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
def A__ ( self :Any ):
'''simple docstring'''
pass
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : Tuple =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : str =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__magic_name__ : str =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : List[str] =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Any =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__magic_name__ : Tuple =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : int =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Tuple =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__magic_name__ : Optional[int] =json.loads(f.read() )
__magic_name__ : str ={"""image_id""": 3_97_69, """annotations""": target}
# encode them
__magic_name__ : Optional[Any] =DetaImageProcessor()
__magic_name__ : Any =image_processing(images=__snake_case , annotations=__snake_case , return_tensors="""pt""" )
# verify pixel values
__magic_name__ : Tuple =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , __snake_case )
__magic_name__ : Optional[int] =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__magic_name__ : Dict =torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __snake_case ) )
# verify boxes
__magic_name__ : Optional[int] =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __snake_case )
__magic_name__ : int =torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__magic_name__ : Any =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __snake_case ) )
# verify is_crowd
__magic_name__ : Optional[Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __snake_case ) )
# verify class_labels
__magic_name__ : List[str] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __snake_case ) )
# verify orig_size
__magic_name__ : Optional[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __snake_case ) )
# verify size
__magic_name__ : Tuple =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __snake_case ) )
@slow
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__magic_name__ : str =json.loads(f.read() )
__magic_name__ : Optional[Any] ={"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__magic_name__ : List[str] =pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__magic_name__ : int =DetaImageProcessor(format="""coco_panoptic""" )
__magic_name__ : Tuple =image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors="""pt""" )
# verify pixel values
__magic_name__ : Union[str, Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , __snake_case )
__magic_name__ : str =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__magic_name__ : str =torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __snake_case ) )
# verify boxes
__magic_name__ : Dict =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __snake_case )
__magic_name__ : Optional[Any] =torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__magic_name__ : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __snake_case ) )
# verify is_crowd
__magic_name__ : int =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __snake_case ) )
# verify class_labels
__magic_name__ : List[Any] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __snake_case ) )
# verify masks
__magic_name__ : List[Any] =82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __snake_case )
# verify orig_size
__magic_name__ : Union[str, Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __snake_case ) )
# verify size
__magic_name__ : List[Any] =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __snake_case ) )
| 367
| 1
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase ( __snake_case ):
def __init__( self : int , __magic_name__ : NestedDataStructureLike[PathLike] , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : Optional[Features] = None , __magic_name__ : str = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[int] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
super().__init__(
__magic_name__ , split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , num_proc=__magic_name__ , **__magic_name__ , )
UpperCamelCase = path_or_paths if isinstance(__magic_name__ , __magic_name__ ) else {self.split: path_or_paths}
UpperCamelCase = Text(
cache_dir=__magic_name__ , data_files=__magic_name__ , features=__magic_name__ , **__magic_name__ , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
if self.streaming:
UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__magic_name__ , download_mode=__magic_name__ , verification_mode=__magic_name__ , base_path=__magic_name__ , num_proc=self.num_proc , )
UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__magic_name__ , in_memory=self.keep_in_memory )
return dataset
| 386
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def _lowercase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, Iterable[int]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size
UpperCamelCase , UpperCamelCase = get_image_size(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = output_size
# determine new height and width
UpperCamelCase = output_height / input_height
UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase = scale_width
else:
# fit height
UpperCamelCase = scale_height
UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ )
return (new_height, new_width)
class UpperCAmelCase ( __snake_case ):
lowercase = ["""pixel_values"""]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = False , __magic_name__ : int = 1 , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_5_5 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase = size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
UpperCamelCase = get_size_dict(__magic_name__ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = keep_aspect_ratio
UpperCamelCase = ensure_multiple_of
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : bool = False , __magic_name__ : int = 1 , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase = get_resize_output_image_size(
__magic_name__ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__magic_name__ , multiple=__magic_name__ , )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : int , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(__magic_name__ )
UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Tuple] = None ):
"""simple docstring"""
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__magic_name__ ):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(__magic_name__ ) ):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__magic_name__ )
UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
UpperCamelCase = logits.argmax(dim=1 )
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 386
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowercase ( lowerCAmelCase__ : Callable[[int | float], int | float] , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int = 100 , ) -> float:
'''simple docstring'''
__a = x_start
__a = fnc(lowerCAmelCase__ )
__a = 0.0
for _ in range(lowerCAmelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__a = (x_end - x_start) / steps + xa
__a = fnc(lowerCAmelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__a = xa
__a = fxa
return area
if __name__ == "__main__":
def lowercase ( lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowercase_ = 1_0
while i <= 1_0_0_0_0_0:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 1_0
| 708
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 500
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return getitem, k
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return setitem, k, v
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return delitem, k
def _a ( UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
try:
return fun(UpperCAmelCase , *UpperCAmelCase ), None
except Exception as e:
return None, e
_A : List[str] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_A : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_A : str = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_A : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_A : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = HashMap(initial_block_size=4 )
lowerCamelCase__ : List[str] = {}
for _, (fun, *args) in enumerate(UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
assert my_res == py_res
assert str(UpperCAmelCase ) == str(UpperCAmelCase )
assert set(UpperCAmelCase ) == set(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def _a ( ) -> Any:
"""simple docstring"""
def is_public(UpperCAmelCase ) -> bool:
return not name.startswith('''_''' )
lowerCamelCase__ : List[Any] = {name for name in dir({} ) if is_public(UpperCAmelCase )}
lowerCamelCase__ : Dict = {name for name in dir(HashMap() ) if is_public(UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 315
| 0
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
_a = ["input_values", "padding_mask"]
def __init__( self , _a = 1 , _a = 2_4000 , _a = 0.0 , _a = None , _a = None , **_a , ) -> Optional[int]:
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
_A : List[Any] = chunk_length_s
_A : Union[str, Any] = overlap
@property
def a__ ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _a , _a = None , _a = False , _a = None , _a = None , _a = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_A : Dict = True
_A : Dict = bool(
isinstance(_a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_A : Optional[int] = [np.asarray(_a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a , np.ndarray ):
_A : List[str] = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_A : Union[str, Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_A : int = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_A : Any = None
_A : Optional[int] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_A : Optional[int] = min(array.shape[0] for array in raw_audio )
_A : str = int(np.floor(max_length / self.chunk_stride ) )
_A : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_A : Tuple = max(array.shape[0] for array in raw_audio )
_A : Dict = int(np.ceil(max_length / self.chunk_stride ) )
_A : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
_A : Tuple = """max_length"""
else:
_A : Optional[int] = input_values
# normal padding on batch
if padded_inputs is None:
_A : Tuple = self.pad(
_a , max_length=_a , truncation=_a , padding=_a , return_attention_mask=_a , )
if padding:
_A : Dict = padded_inputs.pop("""attention_mask""" )
_A : Any = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_A : str = example[..., None]
input_values.append(example.T )
_A : List[str] = input_values
if return_tensors is not None:
_A : List[str] = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
def _a ( a :float ) -> float:
if edge <= 0 or not isinstance(a , a ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _a ( a :float ) -> float:
if edge <= 0 or not isinstance(a , a ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaInpaintPipeline
__snake_case = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__snake_case = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__snake_case = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
a = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_unet
a = self.dummy_movq
a = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__UpperCAmelCase , )
a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : int=0 ) ->Optional[int]:
"""simple docstring"""
a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create init_image
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
a = np.ones((64, 64) , dtype=np.floataa )
a = 0
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
a = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
a = np.ones((768, 768) , dtype=np.floataa )
a = 0
a = '''a hat'''
a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
a = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
a = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device='''cpu''' ).manual_seed(0 )
a , a = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
a = pipeline(
image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 117
| 1
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 531
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=2 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 2
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.type_sequence_label_size
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
snake_case_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ = False
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
snake_case_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
snake_case_ = problem_type['''title''']
snake_case_ = problem_type['''num_labels''']
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
snake_case_ = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
snake_case_ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
snake_case_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DeiTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> Optional[int]:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
snake_case_ = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_UpperCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCAmelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
| 531
| 1
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowercase : List[Any] = logging.get_logger(__name__)
def lowercase_ ( _lowercase , _lowercase ) -> str:
'''simple docstring'''
lowerCamelCase_ : List[Any] = nn.functional.normalize(_UpperCamelCase )
lowerCamelCase_ : str = nn.functional.normalize(_UpperCamelCase )
return torch.mm(_UpperCamelCase , normalized_text_embeds.t() )
class __lowercase ( A__ ):
lowerCamelCase : Optional[Any] = CLIPConfig
lowerCamelCase : Optional[Any] = ["CLIPEncoderLayer"]
def __init__(self , A ):
super().__init__(_a )
lowerCamelCase_ : Optional[int] = CLIPVisionModel(config.vision_config )
lowerCamelCase_ : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_a )
lowerCamelCase_ : Union[str, Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_a )
lowerCamelCase_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_a )
lowerCamelCase_ : Union[str, Any] = nn.Parameter(torch.ones(1_7 ) , requires_grad=_a )
lowerCamelCase_ : List[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=_a )
@torch.no_grad()
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Tuple = self.vision_model(_a )[1] # pooled_output
lowerCamelCase_ : str = self.visual_projection(_a )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ : Optional[int] = cosine_distance(_a , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase_ : Dict = cosine_distance(_a , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : List[Any] = image_embeds.shape[0]
for i in range(_a ):
lowerCamelCase_ : Any = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase_ : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase_ : Any = special_cos_dist[i][concept_idx]
lowerCamelCase_ : str = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase_ : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
lowerCamelCase_ : List[Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase_ : Tuple = cos_dist[i][concept_idx]
lowerCamelCase_ : Tuple = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase_ : int = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_a )
result.append(_a )
lowerCamelCase_ : List[Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Optional[Any] = self.vision_model(_a )[1] # pooled_output
lowerCamelCase_ : Dict = self.visual_projection(_a )
lowerCamelCase_ : int = cosine_distance(_a , self.special_care_embeds )
lowerCamelCase_ : List[Any] = cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase_ : Tuple = 0.0
lowerCamelCase_ : Any = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase_ : List[Any] = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase_ : List[str] = special_care * 0.01
lowerCamelCase_ : Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase_ : List[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase_ : List[Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 422
|
'''simple docstring'''
from itertools import count
def _lowerCAmelCase ( _UpperCamelCase : int = 50 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 405
| 0
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = tempfile.mkdtemp()
snake_case__ : int = 8
# DPR tok
snake_case__ : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case__ : str = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
snake_case__ : Any = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case__ : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case__ : Optional[int] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
snake_case__ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case__ : List[str] = {"unk_token": "<unk>"}
snake_case__ : List[str] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
snake_case__ : str = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : int = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _lowercase ( self : str ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _lowercase ( self : Any ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _lowercase ( self : List[str] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def _lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[str] = self.get_dummy_dataset()
snake_case__ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case__ : Any = dataset
snake_case__ : str = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase ( self : str , __A : List[str] ):
snake_case__ : Dict = self.get_dummy_dataset()
snake_case__ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
snake_case__ : List[str] = os.path.join(self.tmpdirname , "dataset" )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
snake_case__ : Union[str, Any] = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case__ : str = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , )
return retriever
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
snake_case__ : Union[str, Any] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , "wb" ) )
snake_case__ : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
snake_case__ : Union[str, Any] = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase ( self : Optional[int] ):
snake_case__ : List[Any] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__, snake_case__, snake_case__ : Any = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case__ : str = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_ )
snake_case__ : List[str] = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Dict ):
snake_case__ : str = 1
snake_case__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
snake_case__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__, snake_case__, snake_case__ : Union[str, Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Dict ):
snake_case__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
snake_case__ : Union[str, Any] = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Any = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = 1
snake_case__ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
snake_case__ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__, snake_case__, snake_case__ : Optional[Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
snake_case__ : Optional[int] = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = 1
snake_case__ : Optional[Any] = self.get_dummy_legacy_index_retriever()
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__, snake_case__, snake_case__ : Optional[int] = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
snake_case__ : List[Any] = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Optional[Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Union[str, Any] ):
import torch
snake_case__ : List[str] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : List[Any] = [[5, 7], [1_0, 1_1]]
snake_case__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Dict = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
snake_case__, snake_case__, snake_case__ : str = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
snake_case__ : str = retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors="pt" , )
snake_case__, snake_case__, snake_case__, snake_case__ : Dict = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = self.get_dpr_ctx_encoder_tokenizer()
snake_case__ : Tuple = 1
snake_case__ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ )
snake_case__ : Optional[Any] = [[5, 7], [1_0, 1_1]]
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Union[str, Any] = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
self.assertEqual(
len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
| 710
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a__( lowerCamelCase__ ):
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(__snake_case , 'num_encoder_blocks' ) )
class a__:
def __init__( self : str , __snake_case : List[str] , __snake_case : Union[str, Any]=13 , __snake_case : Optional[Any]=64 , __snake_case : int=3 , __snake_case : str=4 , __snake_case : Optional[int]=[2, 2, 2, 2] , __snake_case : Optional[Any]=[8, 4, 2, 1] , __snake_case : int=[16, 32, 64, 1_28] , __snake_case : Tuple=[1, 4, 8, 16] , __snake_case : Union[str, Any]=[1, 2, 4, 8] , __snake_case : Tuple=True , __snake_case : Tuple=True , __snake_case : List[str]="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=0.02 , __snake_case : List[str]=3 , __snake_case : Tuple=None , ):
a : Optional[Any] = parent
a : List[str] = batch_size
a : Dict = image_size
a : Optional[int] = num_channels
a : Optional[Any] = num_encoder_blocks
a : Dict = sr_ratios
a : Tuple = depths
a : Optional[Any] = hidden_sizes
a : str = downsampling_rates
a : List[Any] = num_attention_heads
a : List[Any] = is_training
a : Tuple = use_labels
a : Tuple = hidden_act
a : int = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Optional[Any] = initializer_range
a : List[str] = num_labels
a : Union[str, Any] = scope
def lowercase_ ( self : Optional[int] ):
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : List[Any] = None
if self.use_labels:
a : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Optional[int] ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] ):
a : Union[str, Any] = SegformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[int] = model(__snake_case )
a : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] ):
a : Optional[Any] = self.num_labels
a : str = SegformerForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
a : int = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
a : List[str] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any ):
a : Dict = 1
a : Dict = SegformerForSemanticSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case )
a : List[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase_ ( self : List[str] ):
a : List[str] = self.prepare_config_and_inputs()
a , a , a : Tuple = config_and_inputs
a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = SegformerModelTester(self )
a : List[Any] = SegformerConfigTester(self , config_class=__snake_case )
def lowercase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase_ ( self : str ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : int ):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case )
def lowercase_ ( self : int ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def lowercase_ ( self : str ):
pass
def lowercase_ ( self : Optional[int] ):
a , a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(__snake_case )
a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase_ ( self : str ):
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
for model_class in self.all_model_classes:
a : Tuple = True
a : Tuple = False
a : str = True
a : Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(__snake_case , __snake_case ) )
a : Dict = outputs.attentions
a : Union[str, Any] = sum(self.model_tester.depths )
self.assertEqual(len(__snake_case ) , __snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : List[Any] = True
a : str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a : Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
a : Union[str, Any] = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
a : str = (self.model_tester.image_size // 4) ** 2
a : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a : List[Any] = (self.model_tester.image_size // 32) ** 2
a : List[str] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a : List[Any] = len(__snake_case )
# Check attention is always last and order is fine
a : Optional[int] = True
a : str = True
a : Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
a : Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
a : Tuple = (self.model_tester.image_size // 4) ** 2
a : Optional[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowercase_ ( self : List[str] ):
def check_hidden_states_output(__snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict ):
a : List[str] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a : Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
a : List[str] = outputs.hidden_states
a : List[str] = self.model_tester.num_encoder_blocks
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Dict ):
if not self.model_tester.is_training:
return
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__snake_case ):
continue
a : Any = model_class(__snake_case )
model.to(__snake_case )
model.train()
a : Optional[Any] = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
a : str = model(**__snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Dict ):
pass
@slow
def lowercase_ ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = SegformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__ ( ):
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : Optional[int] ):
# only resize + normalize
a : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
a : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
__snake_case )
a : Tuple = prepare_img()
a : Tuple = image_processor(images=__snake_case , return_tensors='pt' )
a : int = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
a : Union[str, Any] = model(__snake_case )
a : List[str] = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
a : str = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : str ):
# only resize + normalize
a : List[str] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
a : Any = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(__snake_case )
a : str = prepare_img()
a : Tuple = image_processor(images=__snake_case , return_tensors='pt' )
a : Optional[int] = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
a : List[str] = model(__snake_case )
a : str = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
a : List[str] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1e-1 ) )
@slow
def lowercase_ ( self : Optional[Any] ):
# only resize + normalize
a : int = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
a : Any = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
__snake_case )
a : Dict = prepare_img()
a : Tuple = image_processor(images=__snake_case , return_tensors='pt' )
a : Any = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
a : Optional[int] = model(__snake_case )
a : Optional[int] = outputs.logits.detach().cpu()
a : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] )
a : str = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __snake_case )
a : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
a : int = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 526
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( _A , _A , _A ):
a : List[str] = list(range(len(_A ) ) )
a : Union[str, Any] = [v / w for v, w in zip(_A , _A )]
index.sort(key=lambda _A : ratio[i] , reverse=_A )
a : float = 0
a : list[float] = [0] * len(_A )
for i in index:
if weight[i] <= capacity:
a : int = 1
max_value += value[i]
capacity -= weight[i]
else:
a : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
| 1
|
import requests
from bsa import BeautifulSoup
def _UpperCamelCase ( lowercase__ = "AAPL" ):
__SCREAMING_SNAKE_CASE : List[str] = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
__SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
__SCREAMING_SNAKE_CASE : Any = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 707
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowerCAmelCase : Any =pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__lowerCAmelCase : Dict =dataset.iloc[:, 1:2].values
__lowerCAmelCase : Any =dataset.iloc[:, 2].values
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] =train_test_split(X, y, test_size=0.2, random_state=0)
__lowerCAmelCase : List[Any] =PolynomialFeatures(degree=4)
__lowerCAmelCase : Any =poly_reg.fit_transform(X)
__lowerCAmelCase : str =LinearRegression()
pol_reg.fit(X_poly, y)
def _UpperCamelCase ( ):
plt.scatter(lowercase__ , lowercase__ , color='''red''' )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 260
| 0
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowerCamelCase_ ( __a ):
def __init__( self : Tuple , _A : Distribution , _A : int=None , _A : Tuple=None , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 1.0 if scale is None else scale
UpperCAmelCase__ : List[str] = 0.0 if loc is None else loc
super().__init__(_A , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_A )] )
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.variance.sqrt()
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , _A : int , _A : Dict[str, int] , _A : Callable[..., Tuple[torch.Tensor]] , **_A : Tuple ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Optional[Any] = args_dim
UpperCAmelCase__ : Optional[int] = nn.ModuleList([nn.Linear(_A , _A ) for dim in args_dim.values()] )
UpperCAmelCase__ : str = domain_map
def lowercase_ ( self : Union[str, Any] , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [proj(_A ) for proj in self.proj]
return self.domain_map(*_A )
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , _A : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[int] = function
def lowercase_ ( self : Tuple , _A : Optional[Any] , *_A : List[str] ):
'''simple docstring'''
return self.function(_A , *_A )
class lowerCamelCase_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : List[str] , _A : int = 1 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = dim
UpperCAmelCase__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_A )
else:
return Independent(self.distribution_class(*_A ) , 1 )
def lowercase_ ( self : Dict , _A : str , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self._base_distribution(_A )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_A , loc=_A , scale=_A , event_dim=self.event_dim )
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return 0.0
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
return ParameterProjection(
in_features=_A , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase_ ( self : str , *_A : torch.Tensor ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowercase_ ( _A : torch.Tensor ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_A ) + 4.0 )) / 2.0
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def lowercase_ ( cls : Union[str, Any] , _A : torch.Tensor , _A : torch.Tensor , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : int = cls.squareplus(_A ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase__ : Dict = 2.0 + cls.squareplus(_A )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def lowercase_ ( cls : List[str] , _A : torch.Tensor , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = cls.squareplus(_A ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def lowercase_ ( cls : List[Any] , _A : torch.Tensor , _A : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = cls.squareplus(_A )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_A , logits=_A )
else:
return Independent(self.distribution_class(total_count=_A , logits=_A ) , 1 )
def lowercase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None ):
'''simple docstring'''
UpperCAmelCase__ : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 75
|
import qiskit
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ :Union[str, Any] = qiskit.QuantumCircuit(a , a )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE_ :Any = qiskit.execute(a , a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 631
| 0
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCAmelCase_ = '\nimport os\n'
lowerCAmelCase_ = '\ndef foo():\n import os\n return False\n'
lowerCAmelCase_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
lowerCAmelCase_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
lowerCAmelCase_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
lowerCAmelCase_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
lowerCAmelCase_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
lowerCAmelCase_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
lowerCAmelCase_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
lowerCAmelCase_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
lowerCAmelCase_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : int = os.path.join(__lowerCamelCase , '''test_file.py''' )
with open(__lowerCamelCase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowerCamelCase )
lowercase__ : Tuple = get_imports(__lowerCamelCase )
assert parsed_imports == ["os"]
| 122
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = ["image_processor", "tokenizer"]
lowerCAmelCase : Tuple = "CLIPImageProcessor"
lowerCAmelCase : Optional[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] ,_snake_case : Any=None ,_snake_case : List[Any]=None ,**_snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Optional[Any] = kwargs.pop('''feature_extractor''' )
lowercase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
def __call__( self : List[str] ,_snake_case : Tuple=None ,_snake_case : str=None ,_snake_case : Dict=None ,**_snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : Optional[Any] = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : List[Any] ,*_snake_case : Optional[int] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : Union[str, Any] ,**_snake_case : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
@property
def UpperCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,_snake_case ,)
return self.image_processor
| 122
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 0 ):
'''simple docstring'''
_A , _A = row, column
_A = [[default_value for c in range(_UpperCamelCase )] for r in range(_UpperCamelCase )]
def __str__( self : Tuple ):
'''simple docstring'''
_A = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_A = 0
for row_vector in self.array:
for obj in row_vector:
_A = max(_UpperCamelCase , len(str(_UpperCamelCase ) ) )
_A = f'''%{max_element_length}s'''
# Make string and return
def single_line(__UpperCAmelCase : list[float] ) -> str:
nonlocal string_format_identifier
_A = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_UpperCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return str(self )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(_UpperCamelCase , (list, tuple) ) and len(_UpperCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , __UpperCAmelCase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(_UpperCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Any , __UpperCAmelCase : tuple[int, int] , __UpperCAmelCase : float ):
'''simple docstring'''
assert self.validate_indicies(_UpperCamelCase )
_A = value
def __add__( self : Tuple , __UpperCAmelCase : Matrix ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert self.row == another.row and self.column == another.column
# Add
_A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_A = self[r, c] + another[r, c]
return result
def __neg__( self : Optional[Any] ):
'''simple docstring'''
_A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_A = -self[r, c]
return result
def __sub__( self : Any , __UpperCAmelCase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Any , __UpperCAmelCase : int | float | Matrix ):
'''simple docstring'''
if isinstance(_UpperCamelCase , (int, float) ): # Scalar multiplication
_A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_A = self[r, c] * another
return result
elif isinstance(_UpperCamelCase , _UpperCamelCase ): # Matrix multiplication
assert self.column == another.row
_A = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_A = f'''Unsupported type given for another ({type(_UpperCamelCase )})'''
raise TypeError(_UpperCamelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_A = self[r, c]
return result
def lowerCAmelCase ( self : int , __UpperCAmelCase : Matrix , __UpperCAmelCase : Matrix ):
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_A = v.transpose()
_A = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowercase ( ) -> Any:
'''simple docstring'''
_A = Matrix(3 , 3 , 0 )
for i in range(3 ):
_A = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_A = Matrix(3 , 1 , 0 )
_A , _A , _A = 1, 2, -3
_A = Matrix(3 , 1 , 0 )
_A , _A , _A = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
def __lowercase ( ) -> Dict:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 330
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : List[str] = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
_lowerCamelCase : int = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class lowercase ( a ):
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = SqueezeBertTokenizer
def __init__( self : Dict , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]="[UNK]" , _UpperCamelCase : List[Any]="[SEP]" , _UpperCamelCase : Tuple="[PAD]" , _UpperCamelCase : int="[CLS]" , _UpperCamelCase : Tuple="[MASK]" , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def __snake_case( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 403
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 447
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = analyze_text(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE : int = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE : List[Any] = single_char_strings[ch]
__SCREAMING_SNAKE_CASE : str = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__SCREAMING_SNAKE_CASE : Optional[int] = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE : int = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE : Optional[Any] = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _lowerCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
__SCREAMING_SNAKE_CASE : int = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowerCAmelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 447
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_ ( snake_case_ : Union[str, Any] ) -> str:
'''simple docstring'''
return np.maximum(0 , snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 427
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ =r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _A ( __UpperCamelCase ):
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = max_length
UpperCamelCase__ = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
UpperCamelCase__ = input_ids.shape[-1]
UpperCamelCase__ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = start_length
UpperCamelCase__ = max_new_tokens
UpperCamelCase__ = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = max_time
UpperCamelCase__ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _A ( __UpperCamelCase ):
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
return any(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def _a (self ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = stopping_criteria.max_length
UpperCamelCase__ = deepcopy(A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=A ) )
return new_stopping_criteria
| 415
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
_lowerCAmelCase : str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowerCAmelCase : List[str] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = []
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCAmelCase__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_lowerCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_lowerCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCAmelCase__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_lowerCAmelCase )
return next_generation
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = []
for _ in range(_lowerCAmelCase ):
# Create output image
UpperCAmelCase__ = Image.new("RGB" , (len(cells[0] ), len(_lowerCAmelCase )) )
UpperCAmelCase__ = img.load()
# Save cells to image
for x in range(len(_lowerCAmelCase ) ):
for y in range(len(cells[0] ) ):
UpperCAmelCase__ = 255 - cells[y][x] * 255
UpperCAmelCase__ = (colour, colour, colour)
# Save image
images.append(_lowerCAmelCase )
UpperCAmelCase__ = new_generation(_lowerCAmelCase )
return images
if __name__ == "__main__":
_lowerCAmelCase : int = generate_images(GLIDER, 1_6)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 364
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""input_values""", """padding_mask"""]
def __init__( self :Dict , lowerCamelCase :int = 1 , lowerCamelCase :int = 2_4000 , lowerCamelCase :float = 0.0 , lowerCamelCase :float = None , lowerCamelCase :float = None , **lowerCamelCase :Optional[Any] , ) -> str:
super().__init__(feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , **lowerCamelCase )
UpperCAmelCase__ = chunk_length_s
UpperCAmelCase__ = overlap
@property
def UpperCAmelCase_ ( self :Optional[Any] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase_ ( self :str ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :List[Any] , lowerCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None , lowerCamelCase :Optional[bool] = False , lowerCamelCase :Optional[int] = None , lowerCamelCase :Optional[Union[str, TensorType]] = None , lowerCamelCase :Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = bool(
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase__ = [np.asarray(lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [np.asarray(lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(lowerCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
UpperCAmelCase__ = None
UpperCAmelCase__ = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase__ = min(array.shape[0] for array in raw_audio )
UpperCAmelCase__ = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase__ = max(array.shape[0] for array in raw_audio )
UpperCAmelCase__ = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase__ = "max_length"
else:
UpperCAmelCase__ = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase__ = self.pad(
lowerCamelCase , max_length=lowerCamelCase , truncation=lowerCamelCase , padding=lowerCamelCase , return_attention_mask=lowerCamelCase , )
if padding:
UpperCAmelCase__ = padded_inputs.pop("attention_mask" )
UpperCAmelCase__ = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
UpperCAmelCase__ = example[..., None]
input_values.append(example.T )
UpperCAmelCase__ = input_values
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(lowerCamelCase )
return padded_inputs
| 364
| 1
|
'''simple docstring'''
def __lowercase (_lowercase = 1_000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3, _lowercase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 150
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, ) -> Optional[Any]:
"""simple docstring"""
if config_name_or_path is None:
__lowerCamelCase : str = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : str = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : Tuple = question_encoder_name_or_path
__lowerCamelCase : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__lowerCamelCase : List[str] = RagConfig.from_pretrained(_lowercase )
__lowerCamelCase : str = AutoConfig.from_pretrained(_lowercase )
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowercase )
__lowerCamelCase : Optional[int] = gen_config
__lowerCamelCase : str = question_encoder_config
__lowerCamelCase : List[str] = model_class.from_pretrained_question_encoder_generator(
_lowercase, _lowercase, config=_lowercase )
rag_model.save_pretrained(_lowercase )
# Sanity check.
model_class.from_pretrained(_lowercase )
# Save tokenizers.
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(_lowercase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCAmelCase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
UpperCAmelCase__ :str = parser.parse_args()
UpperCAmelCase__ :Optional[int] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 150
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {"""vocab_file""": """vocab.json"""}
UpperCamelCase__ : int = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
UpperCamelCase__ : Dict = {"""mgp-str""": 27}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCamelCase , _lowerCamelCase="[GO]" , _lowerCamelCase="[GO]" , _lowerCamelCase="[s]" , _lowerCamelCase="[GO]" , **_lowerCamelCase ) -> Tuple:
super().__init__(
unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , **__A , )
with open(__A , encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(__A )
A_ : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return len(self.vocab )
def UpperCAmelCase_ ( self ) -> int:
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]:
A_ : List[Any] = []
for s in text:
char_tokens.extend(__A )
return char_tokens
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
return self.vocab.get(__A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
return self.decoder.get(__A )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Optional[int]:
if not os.path.isdir(__A ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__A ) )
return
A_ : Optional[int] = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__A , ensure_ascii=__A ) + """\n""" )
return (vocab_file,)
| 703
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _lowerCamelCase , )
super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
| 385
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : int ):
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCamelCase__ )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase__ )
A = model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ )
A = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A = TextStreamer(UpperCamelCase__ )
model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ , streamer=UpperCamelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A = cs.out[:-1]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ):
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCamelCase__ )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase__ )
A = model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ )
A = tokenizer.decode(greedy_ids[0] )
A = TextIteratorStreamer(UpperCamelCase__ )
A = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
A = Thread(target=model.generate , kwargs=UpperCamelCase__ )
thread.start()
A = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : str ):
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCamelCase__ )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase__ )
A = model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ )
A = greedy_ids[:, input_ids.shape[1] :]
A = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A = TextStreamer(UpperCamelCase__ , skip_prompt=UpperCamelCase__ )
model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ , streamer=UpperCamelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A = cs.out[:-1]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Dict ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
A = AutoTokenizer.from_pretrained('distilgpt2' )
A = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(UpperCamelCase__ )
A = -1
A = torch.ones((1, 5) , device=UpperCamelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A = TextStreamer(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
model.generate(UpperCamelCase__ , max_new_tokens=1 , do_sample=UpperCamelCase__ , streamer=UpperCamelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A = cs.out[:-1] # Remove the final "\n"
A = tokenizer(UpperCamelCase__ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCamelCase ( self : Union[str, Any] ):
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCamelCase__ )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase__ )
A = TextIteratorStreamer(UpperCamelCase__ , timeout=0.001 )
A = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
A = Thread(target=model.generate , kwargs=UpperCamelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase__ ):
A = ''
for new_text in streamer:
streamer_text += new_text
| 699
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699
| 1
|
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : int ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
move_disk(__lowerCamelCase , __lowerCamelCase )
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
print("moving disk from" , __lowerCamelCase , "to" , __lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = int(input("Height of hanoi: " ).strip() )
move_tower(__lowerCamelCase , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 447
|
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : int ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
move_disk(__lowerCamelCase , __lowerCamelCase )
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
print("moving disk from" , __lowerCamelCase , "to" , __lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = int(input("Height of hanoi: " ).strip() )
move_tower(__lowerCamelCase , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 447
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase: Dict = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 526
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]=1_3 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Any=[1_0, 2_0, 3_0, 4_0] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 3, 2] , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : List[str]=1_0 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[Any]=None , ) -> int:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_stages
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : Dict ) -> Any:
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : Dict ) -> int:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = ConvNextVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = ConvNextVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict ) -> int:
__SCREAMING_SNAKE_CASE = ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = ConvNextVaBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self : Any ) -> Dict:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple = False
snake_case__ : Dict = False
snake_case__ : Optional[int] = False
snake_case__ : str = False
snake_case__ : Tuple = False
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = ConvNextVaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def UpperCAmelCase_ ( self : str ) -> Tuple:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_with_labels()
__SCREAMING_SNAKE_CASE = True
if model_class.__name__ in [
*get_values(UpperCAmelCase__ ),
*get_values(UpperCAmelCase__ ),
]:
continue
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_with_labels()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = ConvNextVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : Tuple ) -> str:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> int:
__SCREAMING_SNAKE_CASE = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = preprocessor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 553
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 553
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
"""simple docstring"""
a_ = CustomTokenizer
pass
| 297
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_a : Tuple = logging.get_logger(__name__)
_a : int = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = '''layoutlmv3'''
def __init__( self , UpperCAmelCase=5_0_2_6_5 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-5 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_2_8 , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=1_2_8 , UpperCAmelCase=6_4 , UpperCAmelCase=2_5_6 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=2_2_4 , UpperCAmelCase=3 , UpperCAmelCase=1_6 , UpperCAmelCase=None , **UpperCAmelCase , ):
super().__init__(
vocab_size=UpperCAmelCase , hidden_size=UpperCAmelCase , num_hidden_layers=UpperCAmelCase , num_attention_heads=UpperCAmelCase , intermediate_size=UpperCAmelCase , hidden_act=UpperCAmelCase , hidden_dropout_prob=UpperCAmelCase , attention_probs_dropout_prob=UpperCAmelCase , max_position_embeddings=UpperCAmelCase , type_vocab_size=UpperCAmelCase , initializer_range=UpperCAmelCase , layer_norm_eps=UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase = max_ad_position_embeddings
__lowerCamelCase = coordinate_size
__lowerCamelCase = shape_size
__lowerCamelCase = has_relative_attention_bias
__lowerCamelCase = rel_pos_bins
__lowerCamelCase = max_rel_pos
__lowerCamelCase = has_spatial_attention_bias
__lowerCamelCase = rel_ad_pos_bins
__lowerCamelCase = max_rel_ad_pos
__lowerCamelCase = text_embed
__lowerCamelCase = visual_embed
__lowerCamelCase = input_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = classifier_dropout
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = version.parse('''1.12''' )
@property
def lowerCamelCase_ ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCamelCase_ ( self ):
return 1E-5
@property
def lowerCamelCase_ ( self ):
return 1_2
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 4_0 , UpperCAmelCase = 4_0 , ):
setattr(processor.image_processor , """apply_ocr""" , UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase )
__lowerCamelCase = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowerCamelCase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowerCamelCase = self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = dict(
processor(
UpperCAmelCase , text=UpperCAmelCase , boxes=UpperCAmelCase , return_tensors=UpperCAmelCase , ) )
return inputs
| 479
| 0
|
UpperCAmelCase ={
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def _A ( _a : dict , _a : str , _a : Optional[int] ):
"""simple docstring"""
A = set()
# keep track of all the paths to be checked
A = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A = queue.pop(0 )
# get the last node from the path
A = path[-1]
if node not in explored:
A = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A = list(_a )
new_path.append(_a )
queue.append(_a )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a )
# in case there's no path between the 2 nodes
return []
def _A ( _a : dict , _a : Optional[int] , _a : Any ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A = [start]
A = set(_a )
# Keep tab on distances from `start` node.
A = {start: 0, target: -1}
while queue:
A = queue.pop(0 )
if node == target:
A = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a )
queue.append(_a )
A = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 700
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase =[
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _A ( _a : Union[str, Any] , _a : int=None , _a : List[str]=None , _a : Optional[int]=None ):
"""simple docstring"""
A = True
while ask_again:
A = input(_a )
try:
if default is not None and len(_a ) == 0:
return default
return convert_value(_a ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_a )
def _A ( _a : List[str] , _a : str=[] , _a : Union[str, Any]=None , _a : Dict=0 ):
"""simple docstring"""
A = BulletMenu(_a , _a )
A = menu.run(default_choice=_a )
return convert_value(_a ) if convert_value is not None else result
def _A ( _a : Tuple ):
"""simple docstring"""
A = int(_a )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _A ( _a : Any ):
"""simple docstring"""
A = int(_a )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _A ( _a : str ):
"""simple docstring"""
A = int(_a )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _A ( _a : Dict ):
"""simple docstring"""
A = int(_a )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _A ( _a : List[Any] ):
"""simple docstring"""
A = int(_a )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _A ( _a : List[Any] ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class lowerCamelCase__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = super()._format_usage(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = usage.replace("""<command> [<args>] """ ,"""""" )
return usage
| 255
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Dict = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
SCREAMING_SNAKE_CASE_ : str = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).qformer_tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[int] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : Any = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : str = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = qformer_tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 101
|
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
A : Optional[int] = MvpTokenizer
A : Optional[int] = MvpTokenizerFast
A : Optional[int] = True
A : Tuple = filter_roberta_detectors
def _lowerCamelCase (self ) -> Union[str, Any]:
super().setUp()
lowercase_ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowercase_ : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
lowercase_ : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase_ : List[str] = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def _lowerCamelCase (self , **_a ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def _lowerCamelCase (self , _a ) -> Optional[int]:
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase (self ) -> Any:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _lowerCamelCase (self ) -> int:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowercase_ : int = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : Optional[Any] = tokenizer(_a , max_length=len(_a ) , padding=_a , return_tensors='pt' )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase_ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
# Test that special tokens are reset
@require_torch
def _lowerCamelCase (self ) -> Any:
lowercase_ : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : Optional[int] = tokenizer(_a , padding=_a , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _a )
self.assertIn('attention_mask' , _a )
self.assertNotIn('labels' , _a )
self.assertNotIn('decoder_attention_mask' , _a )
@require_torch
def _lowerCamelCase (self ) -> Optional[int]:
lowercase_ : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : str = tokenizer(text_target=_a , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _lowerCamelCase (self ) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : Any = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=_a , truncation=_a , return_tensors='pt' )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : Union[str, Any] = ['A long paragraph for summarization.']
lowercase_ : Dict = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : str = tokenizer(_a , text_target=_a , return_tensors='pt' )
lowercase_ : Optional[int] = inputs['input_ids']
lowercase_ : Union[str, Any] = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowerCamelCase (self ) -> str:
pass
def _lowerCamelCase (self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowercase_ : Optional[Any] = self.tokenizer_class.from_pretrained(_a , **_a )
lowercase_ : Dict = 'A, <mask> AllenNLP sentence.'
lowercase_ : List[Any] = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
lowercase_ : Any = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowercase_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowercase_ : int = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 720
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase_ : List[Any] = 128
elif "12-12" in model_name:
lowercase_ : Tuple = 12
lowercase_ : List[Any] = 12
elif "14-14" in model_name:
lowercase_ : List[str] = 14
lowercase_ : Optional[Any] = 14
elif "16-16" in model_name:
lowercase_ : Union[str, Any] = 16
lowercase_ : List[str] = 16
else:
raise ValueError('Model not supported' )
lowercase_ : Optional[Any] = 'huggingface/label-files'
if "speech-commands" in model_name:
lowercase_ : List[str] = 35
lowercase_ : int = 'speech-commands-v2-id2label.json'
else:
lowercase_ : Union[str, Any] = 527
lowercase_ : int = 'audioset-id2label.json'
lowercase_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowercase_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase_ : Optional[int] = idalabel
lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if "module.v" in name:
lowercase_ : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
lowercase_ : Optional[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
lowercase_ : Any = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
lowercase_ : List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase_ : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
lowercase_ : Optional[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase_ : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : int = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase_ : int = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
lowercase_ : Dict = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
lowercase_ : List[Any] = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowercase_ : List[str] = key.split('.' )
lowercase_ : int = int(key_split[3] )
lowercase_ : Tuple = config.hidden_size
if "weight" in key:
lowercase_ : Tuple = val[:dim, :]
lowercase_ : Union[str, Any] = val[dim : dim * 2, :]
lowercase_ : Optional[int] = val[-dim:, :]
else:
lowercase_ : Optional[Any] = val[:dim]
lowercase_ : Any = val[dim : dim * 2]
lowercase_ : Tuple = val[-dim:]
else:
lowercase_ : Optional[Any] = val
return orig_state_dict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase_ : Dict = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[int] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowercase_ : Dict = model_name_to_url[model_name]
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
lowercase_ : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
lowercase_ : Optional[Any] = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase_ : Tuple = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978
lowercase_ : str = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526
lowercase_ : str = 1_024 if 'speech-commands' not in model_name else 128
lowercase_ : Dict = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
lowercase_ : Optional[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
lowercase_ : Any = dataset[0]['audio']['array']
else:
lowercase_ : Any = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
lowercase_ ,lowercase_ : Union[str, Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ )
lowercase_ : str = waveform.squeeze().numpy()
lowercase_ : str = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=16_000 , return_tensors='pt' )
# forward pass
lowercase_ : Tuple = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase_ : int = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase_ : Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase_ : Optional[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase_ : List[str] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase_ : List[str] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase_ : Any = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase_ : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase_ : Optional[Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 438
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE__ = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE__ = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE__ = 'adapter_config.json'
SCREAMING_SNAKE_CASE__ = 'adapter_model.bin'
SCREAMING_SNAKE_CASE__ = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE__ = 'tf_model.h5'
SCREAMING_SNAKE_CASE__ = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE__ = 'model.ckpt'
SCREAMING_SNAKE_CASE__ = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE__ = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE__ = 'model.safetensors'
SCREAMING_SNAKE_CASE__ = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE__ = 'config.json'
SCREAMING_SNAKE_CASE__ = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE__ = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE__ = 'generation_config.json'
SCREAMING_SNAKE_CASE__ = 'modelcard.json'
SCREAMING_SNAKE_CASE__ = '▁'
SCREAMING_SNAKE_CASE__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowercase__ ( __UpperCamelCase )-> Tuple:
if version.parse(__UpperCamelCase ) < version.parse(__UpperCamelCase ):
if "dev" in min_version:
UpperCamelCase = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
UpperCamelCase = F"This example requires a minimum version of {min_version},"
error_message += F" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 301
|
'''simple docstring'''
import argparse
import copy
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = {}
with open(__UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
with open(__UpperCamelCase ) as f:
UpperCamelCase = f.read(1 )
UpperCamelCase = start_node
UpperCamelCase = []
UpperCamelCase = start_node
UpperCamelCase = 0
while visiting not in first_solution:
UpperCamelCase = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__UpperCamelCase ) and k[0] not in first_solution:
UpperCamelCase = k[1]
UpperCamelCase = k[0]
first_solution.append(__UpperCamelCase )
UpperCamelCase = distance_of_first_solution + int(__UpperCamelCase )
UpperCamelCase = best_node
first_solution.append(__UpperCamelCase )
UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = []
for n in solution[1:-1]:
UpperCamelCase = solution.index(__UpperCamelCase )
for kn in solution[1:-1]:
UpperCamelCase = solution.index(__UpperCamelCase )
if n == kn:
continue
UpperCamelCase = copy.deepcopy(__UpperCamelCase )
UpperCamelCase = kn
UpperCamelCase = n
UpperCamelCase = 0
for k in _tmp[:-1]:
UpperCamelCase = _tmp[_tmp.index(__UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCamelCase = distance + int(i[1] )
_tmp.append(__UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = 1
UpperCamelCase = first_solution
UpperCamelCase = []
UpperCamelCase = distance_of_first_solution
UpperCamelCase = solution
while count <= iters:
UpperCamelCase = find_neighborhood(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = neighborhood[index_of_best_solution]
UpperCamelCase = len(__UpperCamelCase ) - 1
UpperCamelCase = False
while not found:
UpperCamelCase = 0
while i < len(__UpperCamelCase ):
if best_solution[i] != solution[i]:
UpperCamelCase = best_solution[i]
UpperCamelCase = solution[i]
break
UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCamelCase = True
UpperCamelCase = best_solution[:-1]
UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCamelCase = cost
UpperCamelCase = solution
else:
UpperCamelCase = index_of_best_solution + 1
UpperCamelCase = neighborhood[index_of_best_solution]
if len(__UpperCamelCase ) >= size:
tabu_list.pop(0 )
UpperCamelCase = count + 1
return best_solution_ever, best_cost
def lowercase__ ( __UpperCamelCase=None )-> Tuple:
UpperCamelCase = generate_neighbours(args.File )
UpperCamelCase ,UpperCamelCase = generate_first_solution(
args.File , __UpperCamelCase )
UpperCamelCase ,UpperCamelCase = tabu_search(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 301
| 1
|
"""simple docstring"""
import sys
_UpperCAmelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =1
for digit in s:
product *= int(lowercase )
return product
def __magic_name__ ( lowercase = N ):
SCREAMING_SNAKE_CASE_: Dict =-sys.maxsize - 1
SCREAMING_SNAKE_CASE_: Dict =n[:13]
SCREAMING_SNAKE_CASE_: Dict =13
while cur_index < len(lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE_: int =substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE_: Optional[int] =max(lowercase , str_eval(lowercase ) )
SCREAMING_SNAKE_CASE_: Optional[int] =n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__lowerCamelCase : Optional[Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__lowerCamelCase : Any = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__lowerCamelCase : Dict = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=True , A_=False ):
'''simple docstring'''
if rouge_types is None:
UpperCamelCase : Optional[int] = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
UpperCamelCase : Union[str, Any] = rouge_scorer.RougeScorer(rouge_types=snake_case__ , use_stemmer=snake_case__ )
if use_aggregator:
UpperCamelCase : int = scoring.BootstrapAggregator()
else:
UpperCamelCase : List[str] = []
for ref, pred in zip(snake_case__ , snake_case__ ):
UpperCamelCase : Union[str, Any] = scorer.score(snake_case__ , snake_case__ )
if use_aggregator:
aggregator.add_scores(snake_case__ )
else:
scores.append(snake_case__ )
if use_aggregator:
UpperCamelCase : Tuple = aggregator.aggregate()
else:
UpperCamelCase : List[str] = {}
for key in scores[0]:
UpperCamelCase : Optional[int] = [score[key] for score in scores]
return result
| 629
|
"""simple docstring"""
from pathlib import Path
import fire
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int ) -> int:
lowerCamelCase_ : Any =Path(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =Path(lowerCamelCase__ )
dest_dir.mkdir(exist_ok=lowerCamelCase__ )
for path in src_dir.iterdir():
lowerCamelCase_ : Optional[Any] =[x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCamelCase_ : Tuple =dest_dir.joinpath(path.name )
print(lowerCamelCase__ )
dest_path.open("w" ).write("\n".join(lowerCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 153
| 0
|
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__A : Any = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
__A : Optional[Any] = flatten_dict(__SCREAMING_SNAKE_CASE )
return flax_params
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
__A : Any = {}
__A : str = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
__A : List[str] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__A : Optional[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__A : Optional[int] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__A : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__A : List[Any] = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , __SCREAMING_SNAKE_CASE )
__A : Tuple = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__A : Union[str, Any] = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , __SCREAMING_SNAKE_CASE )
__A : Optional[Any] = flax_dict[key]
__A : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__A : Tuple = torch.from_numpy(converted_dict[key].T )
else:
__A : Dict = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False ) -> str:
"""simple docstring"""
__A : Any = get_flax_param(__SCREAMING_SNAKE_CASE )
if not use_large:
__A : List[Any] = PixaStructVisionConfig()
__A : Tuple = PixaStructTextConfig()
else:
__A : Any = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
__A : Any = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
__A : Dict = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__SCREAMING_SNAKE_CASE )
__A : Tuple = PixaStructForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__A : Optional[int] = rename_and_convert_flax_params(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
__A : Dict = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
__A : Union[str, Any] = PixaStructImageProcessor()
__A : Dict = PixaStructProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
if use_large:
__A : List[Any] = 4096
__A : Optional[Any] = True
# mkdir if needed
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Model saved in {}""".format(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
A__ : Dict =argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
A__ : str =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 706
|
'''simple docstring'''
from collections.abc import Sequence
def A_ ( __SCREAMING_SNAKE_CASE : Sequence[float] , __SCREAMING_SNAKE_CASE : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
__A : Any = 0 if allow_empty_subarrays else float("""-inf""" )
__A : List[Any] = 0.0
for num in arr:
__A : Tuple = max(0 if allow_empty_subarrays else num , curr_sum + num )
__A : Optional[int] = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ : Any =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 499
| 0
|
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1
|
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase :
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Tuple=13 , __snake_case : Any=7 , __snake_case : Union[str, Any]=True , __snake_case : List[str]=True , __snake_case : Optional[int]=False , __snake_case : List[Any]=True , __snake_case : str=99 , __snake_case : Optional[int]=32 , __snake_case : Any=5 , __snake_case : Tuple=4 , __snake_case : List[Any]=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : int=0.1 , __snake_case : Any=5_12 , __snake_case : Dict=16 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : Any=3 , __snake_case : str=4 , __snake_case : int=None , ):
'''simple docstring'''
_snake_case: Dict = parent
_snake_case: Optional[int] = batch_size
_snake_case: List[Any] = seq_length
_snake_case: Union[str, Any] = is_training
_snake_case: Optional[Any] = use_input_mask
_snake_case: Dict = use_token_type_ids
_snake_case: Any = use_labels
_snake_case: Optional[Any] = vocab_size
_snake_case: List[Any] = hidden_size
_snake_case: int = num_hidden_layers
_snake_case: List[str] = num_attention_heads
_snake_case: List[Any] = intermediate_size
_snake_case: Optional[Any] = hidden_act
_snake_case: str = hidden_dropout_prob
_snake_case: List[str] = attention_probs_dropout_prob
_snake_case: Dict = max_position_embeddings
_snake_case: Optional[Any] = type_vocab_size
_snake_case: List[Any] = type_sequence_label_size
_snake_case: List[str] = initializer_range
_snake_case: List[str] = num_labels
_snake_case: Tuple = num_choices
_snake_case: Dict = scope
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case: int = None
if self.use_input_mask:
_snake_case: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case: List[Any] = None
if self.use_token_type_ids:
_snake_case: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case: Optional[int] = None
_snake_case: Tuple = None
_snake_case: Union[str, Any] = None
if self.use_labels:
_snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case: Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : int ):
'''simple docstring'''
_snake_case: Optional[Any] = LlamaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[Any] = model(__snake_case , attention_mask=__snake_case )
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : str , __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Any , ):
'''simple docstring'''
_snake_case: str = True
_snake_case: int = LlamaModel(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: int = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
_snake_case: Optional[int] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , )
_snake_case: List[str] = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , ):
'''simple docstring'''
_snake_case: Union[str, Any] = LlamaForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: str = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any , ):
'''simple docstring'''
_snake_case: Any = True
_snake_case: Optional[int] = True
_snake_case: List[str] = LlamaForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
_snake_case: Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , )
_snake_case: Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case: Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case: str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case: Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case: Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case: Tuple = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
_snake_case: Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
# select random slice
_snake_case: List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case: str = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case: Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: List[str] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): Dict = config_and_inputs
_snake_case: Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Optional[int] = LlamaModelTester(self )
_snake_case: Dict = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case: Union[str, Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: int = 3
_snake_case: Optional[Any] = input_dict['input_ids']
_snake_case: Tuple = input_ids.ne(1 ).to(__snake_case )
_snake_case: Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case: Union[str, Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: str = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Dict = 3
_snake_case: str = 'single_label_classification'
_snake_case: List[str] = input_dict['input_ids']
_snake_case: Optional[int] = input_ids.ne(1 ).to(__snake_case )
_snake_case: List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case: Optional[Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[Any] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Any = 3
_snake_case: Optional[int] = 'multi_label_classification'
_snake_case: Tuple = input_dict['input_ids']
_snake_case: Optional[Any] = input_ids.ne(1 ).to(__snake_case )
_snake_case: List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case: Union[str, Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[int] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Optional[int] = ids_tensor([1, 10] , config.vocab_size )
_snake_case: Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case: Tuple = LlamaModel(__snake_case )
original_model.to(__snake_case )
original_model.eval()
_snake_case: List[Any] = original_model(__snake_case ).last_hidden_state
_snake_case: List[str] = original_model(__snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case: Tuple = {'type': scaling_type, 'factor': 10.0}
_snake_case: List[Any] = LlamaModel(__snake_case )
scaled_model.to(__snake_case )
scaled_model.eval()
_snake_case: Dict = scaled_model(__snake_case ).last_hidden_state
_snake_case: str = scaled_model(__snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_snake_case: Optional[int] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_snake_case: int = model(torch.tensor(__snake_case ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Optional[Any] = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_snake_case: List[str] = model(torch.tensor(__snake_case ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Optional[int] = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: int = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_snake_case: Dict = model(torch.tensor(__snake_case ) )
_snake_case: Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# fmt: off
_snake_case: str = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Dict = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_snake_case: Dict = 'Simply put, the theory of relativity states that '
_snake_case: List[str] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_snake_case: Optional[Any] = tokenizer.encode(__snake_case , return_tensors='pt' )
_snake_case: Optional[int] = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=__snake_case )
# greedy generation outputs
_snake_case: List[Any] = model.generate(__snake_case , max_new_tokens=64 , top_p=__snake_case , temperature=1 , do_sample=__snake_case )
_snake_case: Dict = tokenizer.decode(generated_ids[0] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 273
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
A : Optional[int] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
A : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
A : List[Any] = BeautifulSoup(res.text, 'html.parser')
A : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 273
| 1
|
def lowerCamelCase_(lowerCamelCase_ ) -> float:
return 10 - x * x
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase_ ) * equation(lowerCamelCase_ ) >= 0:
raise ValueError("Wrong space!" )
UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase_ ) * equation(lowerCamelCase_ ) < 0:
UpperCAmelCase = c
else:
UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 323
|
def lowerCamelCase_(lowerCamelCase_ ) -> None:
UpperCAmelCase = generate_pascal_triangle(lowerCamelCase_ )
for row_idx in range(lowerCamelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowerCamelCase_(lowerCamelCase_ ) -> list[list[int]]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase = []
for current_row_idx in range(lowerCamelCase_ ):
UpperCAmelCase = populate_current_row(lowerCamelCase_ , lowerCamelCase_ )
triangle.append(lowerCamelCase_ )
return triangle
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> list[int]:
UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase , UpperCAmelCase = 1, 1
for current_col_idx in range(1 , lowerCamelCase_ ):
calculate_current_element(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return current_row
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> None:
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase = above_to_left_elt + above_to_right_elt
def lowerCamelCase_(lowerCamelCase_ ) -> list[list[int]]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase = [[1]]
for row_index in range(1 , lowerCamelCase_ ):
UpperCAmelCase = [0] + result[-1] + [0]
UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase = sum(divmod(lowerCamelCase_ , 2 ) )
UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase = row_first_half + row_second_half
result.append(lowerCamelCase_ )
return result
def lowerCamelCase_() -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) -> None:
UpperCAmelCase = F'{func.__name__}({value})'
UpperCAmelCase = timeit(F'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 323
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = s.rsplit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return new.join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
lowercase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
lowercase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowercase = rreplace(__SCREAMING_SNAKE_CASE , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowercase = rreplace(__SCREAMING_SNAKE_CASE , '.b' , '.bias' , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True ):
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(__SCREAMING_SNAKE_CASE ):
lowercase = torch.load(__SCREAMING_SNAKE_CASE )
else:
lowercase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(__SCREAMING_SNAKE_CASE )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(__SCREAMING_SNAKE_CASE ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(__SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase = hf_model.state_dict()
lowercase = count_parameters(__SCREAMING_SNAKE_CASE )
lowercase = count_parameters(__SCREAMING_SNAKE_CASE )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCAmelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 565
|
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = [i[0] for i in r], [i[1] for i in r]
lowercase = list(accumulate(__SCREAMING_SNAKE_CASE ) )
lowercase = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565
| 1
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"""vocab_file""": """spiece.model"""}
__UpperCAmelCase ={
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCAmelCase ={
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
__UpperCAmelCase ="""▁"""
class lowerCAmelCase__ ( a_ ):
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase__ , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__=1_00 , UpperCamelCase__=None , UpperCamelCase__ = None , UpperCamelCase__=True , **UpperCamelCase__ , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
A__ = [f"""<extra_id_{i}>""" for i in range(lowercase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A__ = len(set(filter(lambda UpperCamelCase__ : bool("extra_id" in str(lowercase_ ) ) , lowercase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
A__ = legacy
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
A__ = vocab_file
A__ = extra_ids
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@staticmethod
def lowercase_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
A__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def lowercase_ ( self ):
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_ )) + [1]
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def lowercase_ ( self ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"<extra_id_\d+>" , lowercase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowercase_ ( self ):
'''simple docstring'''
return [self._convert_token_to_id(lowercase_ ) for token in self.get_sentinel_tokens()]
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if len(lowercase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = self._add_eos_if_not_present(lowercase_ )
if token_ids_a is None:
return token_ids_a
else:
A__ = self._add_eos_if_not_present(lowercase_ )
return token_ids_a + token_ids_a
def __getstate__( self ):
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
if not self.legacy:
A__ = SPIECE_UNDERLINE + text.replace(lowercase_ , " " )
return super().tokenize(lowercase_ , **lowercase_ )
def lowercase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
if not self.legacy:
A__ = text.startswith(lowercase_ )
if is_first:
A__ = text[1:]
A__ = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(lowercase_ ):
A__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if token.startswith("<extra_id_" ):
A__ = re.match(R"<extra_id_(\d+)>" , lowercase_ )
A__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
A__ = self.sp_model.IdToPiece(lowercase_ )
else:
A__ = f"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = []
A__ = ""
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(lowercase_ )
A__ = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 337
|
lowerCamelCase_ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 318
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowerCamelCase : int ):
UpperCamelCase :Any = data
UpperCamelCase :Node | None = None
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = None
UpperCamelCase :int = None
def __iter__( self : Dict ):
UpperCamelCase :Tuple = self.head
while self.head:
yield node.data
UpperCamelCase :Union[str, Any] = node.next
if node == self.head:
break
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : Any ):
return "->".join(str(lowercase__ ) for item in iter(self ) )
def _A ( self : List[Any] , __lowerCamelCase : List[Any] ):
self.insert_nth(len(self ) , lowercase__ )
def _A ( self : Dict , __lowerCamelCase : str ):
self.insert_nth(0 , lowercase__ )
def _A ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
UpperCamelCase :Union[str, Any] = Node(lowercase__ )
if self.head is None:
UpperCamelCase :List[Any] = new_node # first node points itself
UpperCamelCase :Union[str, Any] = new_node
elif index == 0: # insert at head
UpperCamelCase :int = self.head
UpperCamelCase :str = new_node
else:
UpperCamelCase :Any = self.head
for _ in range(index - 1 ):
UpperCamelCase :Union[str, Any] = temp.next
UpperCamelCase :Dict = temp.next
UpperCamelCase :List[str] = new_node
if index == len(self ) - 1: # insert at tail
UpperCamelCase :Tuple = new_node
def _A ( self : Any ):
return self.delete_nth(0 )
def _A ( self : Any ):
return self.delete_nth(len(self ) - 1 )
def _A ( self : int , __lowerCamelCase : str = 0 ):
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
UpperCamelCase :Union[str, Any] = self.head
if self.head == self.tail: # just one node
UpperCamelCase :Tuple = None
elif index == 0: # delete head node
UpperCamelCase :List[str] = self.tail.next.next
UpperCamelCase :Optional[Any] = self.head.next
else:
UpperCamelCase :Union[str, Any] = self.head
for _ in range(index - 1 ):
UpperCamelCase :Tuple = temp.next
UpperCamelCase :str = temp.next
UpperCamelCase :Optional[Any] = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCamelCase :List[str] = temp
return delete_node.data
def _A ( self : Optional[Any] ):
return len(self ) == 0
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : int ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase :List[str] = mock.Mock()
UpperCamelCase :List[Any] = 500
UpperCamelCase :List[Any] = {}
UpperCamelCase :Tuple = HTTPError
UpperCamelCase :Union[str, Any] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase :Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__lowerCamelCase ) as mock_head:
UpperCamelCase :List[str] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _A ( self : Union[str, Any] ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase :Union[str, Any] = mock.Mock()
UpperCamelCase :Any = 500
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :Optional[int] = HTTPError
UpperCamelCase :List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase :Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__lowerCamelCase ) as mock_head:
UpperCamelCase :List[str] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _A ( self : Optional[Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase :List[Any] = tempfile.mktemp()
with open(__lowerCamelCase , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = AlbertTokenizer.from_pretrained(__lowerCamelCase )
finally:
os.remove(__lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , __lowerCamelCase )
UpperCamelCase :Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def _A ( self : Dict ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase :Optional[int] = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def _A ( cls : Optional[Any] ):
UpperCamelCase :Optional[int] = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def _A ( cls : Tuple ):
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def _A ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :List[str] = os.path.join(__lowerCamelCase , """vocab.txt""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCamelCase :Any = BertTokenizer(__lowerCamelCase )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase , repo_id="""test-tokenizer""" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _A ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :List[Any] = os.path.join(__lowerCamelCase , """vocab.txt""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCamelCase :Optional[Any] = BertTokenizer(__lowerCamelCase )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__lowerCamelCase , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
UpperCamelCase :int = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _A ( self : Tuple ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Optional[int] = os.path.join(__lowerCamelCase , """vocab.txt""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCamelCase :Tuple = CustomTokenizer(__lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Dict = os.path.join(__lowerCamelCase , """vocab.txt""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCamelCase :Dict = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
UpperCamelCase :List[str] = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=__lowerCamelCase , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : int ):
UpperCamelCase :Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def _A ( self : List[str] ):
UpperCamelCase :List[Any] = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def _A ( self : List[str] ):
UpperCamelCase :int = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def _A ( self : List[Any] ):
UpperCamelCase :int = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def _A ( self : Tuple ):
UpperCamelCase :Dict = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def _A ( self : str ):
UpperCamelCase :Dict = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def _A ( self : List[str] ):
UpperCamelCase :Tuple = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def _A ( self : Tuple ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase :List[Any] = Trie()
UpperCamelCase :Dict = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__lowerCamelCase , ["""AB""", """C"""] )
| 590
| 0
|
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="autoformer"
UpperCamelCase ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "student_t" , UpperCamelCase_ = "nll" , UpperCamelCase_ = 1 , UpperCamelCase_ = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase_ = True , UpperCamelCase_ = 0 , UpperCamelCase_ = 0 , UpperCamelCase_ = 0 , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 64 , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , UpperCamelCase_ = 32 , UpperCamelCase_ = 32 , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 1_00 , UpperCamelCase_ = 0.0_2 , UpperCamelCase_ = True , UpperCamelCase_=True , UpperCamelCase_ = 10 , UpperCamelCase_ = 25 , UpperCamelCase_ = 3 , **UpperCamelCase_ , ) -> List[Any]:
# time series specific configuration
__lowercase : int = prediction_length
__lowercase : List[str] = context_length if context_length is not None else prediction_length
__lowercase : str = distribution_output
__lowercase : Union[str, Any] = loss
__lowercase : List[Any] = input_size
__lowercase : Any = num_time_features
__lowercase : Optional[Any] = lags_sequence
__lowercase : Any = scaling
__lowercase : List[Any] = num_dynamic_real_features
__lowercase : Dict = num_static_real_features
__lowercase : List[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowercase : List[str] = cardinality
else:
__lowercase : Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowercase : int = embedding_dimension
else:
__lowercase : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase : Tuple = num_parallel_samples
# Transformer architecture configuration
__lowercase : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
__lowercase : str = d_model
__lowercase : Tuple = encoder_attention_heads
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : int = encoder_ffn_dim
__lowercase : Union[str, Any] = decoder_ffn_dim
__lowercase : Any = encoder_layers
__lowercase : Optional[int] = decoder_layers
__lowercase : str = dropout
__lowercase : str = attention_dropout
__lowercase : Optional[int] = activation_dropout
__lowercase : Tuple = encoder_layerdrop
__lowercase : Tuple = decoder_layerdrop
__lowercase : List[str] = activation_function
__lowercase : Tuple = init_std
__lowercase : Optional[Any] = use_cache
# Autoformer
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Dict = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def _lowerCamelCase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 76
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["image_processor", "tokenizer"]
a_ = "FlavaImageProcessor"
a_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple , __A : int=None , __A : Optional[Any]=None , **__A : Dict ):
snake_case__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
snake_case__ : Tuple = kwargs.pop("feature_extractor" )
snake_case__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
snake_case__ : Optional[int] = self.image_processor
def __call__( self : Dict , __A : Optional[ImageInput] = None , __A : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = False , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : Dict , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case__ : Dict = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
if images is not None:
snake_case__ : int = self.image_processor(
__A , return_image_mask=__A , return_codebook_pixels=__A , return_tensors=__A , **__A , )
if text is not None and images is not None:
encoding.update(__A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _lowercase ( self : str , *__A : List[Any] , **__A : Dict ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowercase ( self : Optional[Any] , *__A : Optional[int] , **__A : List[Any] ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = self.tokenizer.model_input_names
snake_case__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _lowercase ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 297
| 0
|
import mpmath # for roots of unity
import numpy as np
class __lowercase :
def __init__( self : List[str] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = list(poly_a or [0] )[:]
UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCAmelCase = self.__multiply()
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(lowerCamelCase_ ) <= 1:
return dft[0]
#
UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
UpperCAmelCase = [[] for i in range(lowerCamelCase_ )]
UpperCAmelCase = self.root**next_ncol
# First half of next step
UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCAmelCase = new_dft
UpperCAmelCase = next_ncol // 2
return dft[0]
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = self.__dft("""A""" )
UpperCAmelCase = self.__dft("""B""" )
UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
UpperCAmelCase = [[] for i in range(lowerCamelCase_ )]
UpperCAmelCase = self.root ** (next_ncol // 2)
UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = """A = """ + """ + """.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCAmelCase = """B = """ + """ + """.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCAmelCase = """A*B = """ + """ + """.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
def __init__( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : Optional[Any]=3_0 , __lowerCamelCase : Any=2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : str=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Union[str, Any]=3_7 , __lowerCamelCase : str="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=2 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 2
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFDeiTModel(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForMaskedImageModeling(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForMaskedImageModeling(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase = TFDeiTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Dense ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowercase ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=False ) -> int:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFDeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) ->Tuple:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 627
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase( metaclass=__snake_case ):
'''simple docstring'''
__magic_name__ = ['onnx']
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['onnx'] )
@classmethod
def lowerCAmelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ['onnx'] )
@classmethod
def lowerCAmelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ['onnx'] )
| 27
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class snake_case_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
self.check_model_type(__lowerCamelCase )
def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = {}, {}
if padding is not None:
__lowercase = padding
if truncation is not None:
__lowercase = truncation
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , __lowerCamelCase : Union["Image.Image", str] , __lowerCamelCase : str = None , **__lowerCamelCase : Dict ) -> Tuple:
'''simple docstring'''
if isinstance(__lowerCamelCase , (Image.Image, str) ) and isinstance(__lowerCamelCase , __lowerCamelCase ):
__lowercase = {'image': image, 'question': question}
else:
__lowercase = image
__lowercase = super().__call__(__lowerCamelCase , **__lowerCamelCase )
return results
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=False ) -> Optional[int]:
'''simple docstring'''
__lowercase = load_image(inputs['image'] )
__lowercase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=__lowerCamelCase , truncation=__lowerCamelCase )
__lowercase = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
model_inputs.update(__lowerCamelCase )
return model_inputs
def UpperCAmelCase ( self : str , __lowerCamelCase : Tuple ) -> Any:
'''simple docstring'''
__lowercase = self.model(**__lowerCamelCase )
return model_outputs
def UpperCAmelCase ( self : int , __lowerCamelCase : Any , __lowerCamelCase : int=5 ) -> Tuple:
'''simple docstring'''
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.sigmoid()[0]
__lowercase , __lowercase = probs.topk(__lowerCamelCase )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCamelCase , __lowerCamelCase )]
| 375
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def _A ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
lowerCAmelCase__ = 2
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["lr"]
lowerCAmelCase__ = int(config["num_epochs"] )
lowerCAmelCase__ = int(config["seed"] )
lowerCAmelCase__ = int(config["batch_size"] )
lowerCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**lowerCAmelCase_ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase__ = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 125
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 125
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCAmelCase_ : Optional[Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCAmelCase_ : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCAmelCase_ : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
UpperCAmelCase_ : int = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
UpperCAmelCase_ : Dict = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
UpperCAmelCase_ : Optional[Any] = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
UpperCAmelCase_ : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCAmelCase_ : List[Any] = np.expand_dims(test_image, axis=0)
UpperCAmelCase_ : Dict = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCAmelCase_ : Union[str, Any] = '''Normal'''
if result[0][0] == 1:
UpperCAmelCase_ : Tuple = '''Abnormality detected'''
| 17
|
"""simple docstring"""
from collections import deque
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
a__ = process_name # process name
a__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
a__ = arrival_time
a__ = burst_time # remaining burst time
a__ = 0 # total time of the process wait in ready queue
a__ = 0 # time from arrival time to completion time
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
a__ = number_of_queues
# time slice of queues that round robin algorithm applied
a__ = time_slices
# unfinished process is in this ready_queue
a__ = queue
# current time
a__ = current_time
# finished process is in this sequence queue
a__ = deque()
def lowercase__ ( self ) -> list[str]:
"""simple docstring"""
a__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> deque[Process]:
"""simple docstring"""
a__ = deque() # sequence deque of finished process
while len(__SCREAMING_SNAKE_CASE ) != 0:
a__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__SCREAMING_SNAKE_CASE )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
a__ = 0
# set the process's turnaround time because it is finished
a__ = self.current_time - cp.arrival_time
# set the completion time
a__ = self.current_time
# add the process to queue that has finished queue
finished.append(__SCREAMING_SNAKE_CASE )
self.finish_queue.extend(__SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
a__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
a__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__SCREAMING_SNAKE_CASE )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
a__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__SCREAMING_SNAKE_CASE )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
a__ = 0
# set the finish time
a__ = self.current_time
# update the process' turnaround time because it is finished
a__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__SCREAMING_SNAKE_CASE )
self.finish_queue.extend(__SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowercase__ ( self ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
a__ , a__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a : List[str] = Process('P1', 0, 53)
a : Optional[int] = Process('P2', 0, 17)
a : Union[str, Any] = Process('P3', 0, 68)
a : Optional[int] = Process('P4', 0, 24)
a : Optional[int] = 3
a : Optional[Any] = [17, 25]
a : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
a : Union[str, Any] = Process('P1', 0, 53)
a : Optional[int] = Process('P2', 0, 17)
a : Optional[Any] = Process('P3', 0, 68)
a : str = Process('P4', 0, 24)
a : Optional[Any] = 3
a : Tuple = [17, 25]
a : int = deque([Pa, Pa, Pa, Pa])
a : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
a : Optional[int] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 273
| 0
|
'''simple docstring'''
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
if len(UpperCAmelCase ) <= 1:
return [tuple(UpperCAmelCase )]
snake_case__ : Tuple = []
def generate(UpperCAmelCase , UpperCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , UpperCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case__ : Any = arr[k - 1], arr[i]
else: # k is odd
snake_case__ : Any = arr[k - 1], arr[0]
generate(k - 1 , UpperCAmelCase )
generate(len(UpperCAmelCase ) , UpperCAmelCase )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 708
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(UpperCAmelCase , """_dynamo""" ):
return False
return isinstance(UpperCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase = True ):
"""simple docstring"""
snake_case__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
snake_case__ : List[Any] = is_compiled_module(UpperCAmelCase )
if is_compiled:
snake_case__ : List[str] = model
snake_case__ : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case__ : List[Any] = model.module
if not keep_fpaa_wrapper:
snake_case__ : List[Any] = getattr(UpperCAmelCase , """forward""" )
snake_case__ : List[str] = model.__dict__.pop("""_original_forward""" , UpperCAmelCase )
if original_forward is not None:
while hasattr(UpperCAmelCase , """__wrapped__""" ):
snake_case__ : str = forward.__wrapped__
if forward == original_forward:
break
snake_case__ : Tuple = forward
if getattr(UpperCAmelCase , """_converted_to_transformer_engine""" , UpperCAmelCase ):
convert_model(UpperCAmelCase , to_transformer_engine=UpperCAmelCase )
if is_compiled:
snake_case__ : Dict = model
snake_case__ : int = compiled_model
return model
def lowerCAmelCase__ ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCAmelCase , UpperCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(UpperCAmelCase , UpperCAmelCase )
@contextmanager
def lowerCAmelCase__ ( **UpperCAmelCase ):
"""simple docstring"""
for key, value in kwargs.items():
snake_case__ : List[str] = str(UpperCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
if not hasattr(UpperCAmelCase , """__qualname__""" ) and not hasattr(UpperCAmelCase , """__name__""" ):
snake_case__ : int = getattr(UpperCAmelCase , """__class__""" , UpperCAmelCase )
if hasattr(UpperCAmelCase , """__qualname__""" ):
return obj.__qualname__
if hasattr(UpperCAmelCase , """__name__""" ):
return obj.__name__
return str(UpperCAmelCase )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
for key, value in source.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case__ : Any = destination.setdefault(UpperCAmelCase , {} )
merge_dicts(UpperCAmelCase , UpperCAmelCase )
else:
snake_case__ : Tuple = value
return destination
def lowerCAmelCase__ ( UpperCAmelCase = None ):
"""simple docstring"""
if port is None:
snake_case__ : int = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 172
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : List[Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 405
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase : Tuple = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase : List[Any] = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCamelCase : str = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
# source code of `config_class`
_SCREAMING_SNAKE_CASE =inspect.getsource(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_SCREAMING_SNAKE_CASE =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_SCREAMING_SNAKE_CASE =f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
_SCREAMING_SNAKE_CASE =ckpt_name
break
return checkpoint
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_SCREAMING_SNAKE_CASE =get_checkpoint_from_config_class(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE ='\n'.join(sorted(_UpperCamelCase ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 405
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = parent
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {}
def UpperCamelCase_( ) -> List[str]:
_lowercase : int = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
_lowercase : List[Any] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Tuple = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = MarkupLMFeatureExtractionTester(self)
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.feature_extraction_class()
# Test not batched input
_lowercase : List[Any] = get_html_strings()[0]
_lowercase : Optional[Any] = feature_extractor(lowerCamelCase)
# fmt: off
_lowercase : str = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
_lowercase : Dict = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes, lowerCamelCase)
self.assertEqual(encoding.xpaths, lowerCamelCase)
# Test batched
_lowercase : Optional[int] = get_html_strings()
_lowercase : Tuple = feature_extractor(lowerCamelCase)
# fmt: off
_lowercase : List[Any] = expected_nodes + [['My First Heading', 'My first paragraph.']]
_lowercase : List[str] = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes), 2)
self.assertEqual(len(encoding.xpaths), 2)
self.assertEqual(encoding.nodes, lowerCamelCase)
self.assertEqual(encoding.xpaths, lowerCamelCase)
| 354
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = parent
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {}
def UpperCamelCase_( ) -> List[str]:
_lowercase : int = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
_lowercase : List[Any] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Tuple = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = MarkupLMFeatureExtractionTester(self)
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.feature_extraction_class()
# Test not batched input
_lowercase : List[Any] = get_html_strings()[0]
_lowercase : Optional[Any] = feature_extractor(lowerCamelCase)
# fmt: off
_lowercase : str = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
_lowercase : Dict = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes, lowerCamelCase)
self.assertEqual(encoding.xpaths, lowerCamelCase)
# Test batched
_lowercase : Optional[int] = get_html_strings()
_lowercase : Tuple = feature_extractor(lowerCamelCase)
# fmt: off
_lowercase : List[Any] = expected_nodes + [['My First Heading', 'My first paragraph.']]
_lowercase : List[str] = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes), 2)
self.assertEqual(len(encoding.xpaths), 2)
self.assertEqual(encoding.nodes, lowerCamelCase)
self.assertEqual(encoding.xpaths, lowerCamelCase)
| 354
| 1
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=50 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ) ->int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = use_labels
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->Optional[int]:
lowerCAmelCase = BertGenerationEncoder(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->Union[str, Any]:
lowerCAmelCase = True
lowerCAmelCase = BertGenerationEncoder(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->Dict:
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = BertGenerationDecoder(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
# first forward pass
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , ) ->Dict:
lowerCAmelCase = BertGenerationDecoder(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase_ : Any = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase_ : Optional[int] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = BertGenerationEncoderTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = '''bert'''
self.model_tester.create_and_check_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 312
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : List[Any] = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 312
| 1
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase :
UpperCAmelCase__ = None
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase__ , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Optional[int] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
lowerCamelCase__ : List[str] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self : List[str] ) -> Any:
lowerCamelCase__ : Any = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 700
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_UpperCAmelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
_UpperCAmelCase : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class lowerCAmelCase :
def __init__( self : Any , UpperCAmelCase : Iterable[int] ) -> None:
lowerCamelCase__ : Node | None = None
for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ):
lowerCamelCase__ : Tuple = Node(UpperCAmelCase , self.head )
def __iter__( self : Optional[int] ) -> Iterator[int]:
lowerCamelCase__ : str = self.head
while node:
yield node.data
lowerCamelCase__ : List[str] = node.next_node
def __len__( self : List[str] ) -> int:
return sum(1 for _ in self )
def __str__( self : Any ) -> str:
return " -> ".join([str(UpperCAmelCase ) for node in self] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(_UpperCAmelCase ) + list(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 188
| 0
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _lowercase ( __lowerCamelCase ):
_lowercase : Union[str, Any] = 'MCTCTFeatureExtractor'
_lowercase : Optional[int] = 'AutoTokenizer'
def __init__( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
A_ = self.feature_extractor
A_ = False
def __call__( self : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A_ = kwargs.pop('''raw_speech''' )
else:
A_ = kwargs.pop('''audio''' , lowerCamelCase__ )
A_ = kwargs.pop('''sampling_rate''' , lowerCamelCase__ )
A_ = kwargs.pop('''text''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A_ = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
A_ = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self : Union[str, Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
A_ = kwargs.pop('''input_features''' , lowerCamelCase__ )
A_ = kwargs.pop('''labels''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if input_features is not None:
A_ = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
A_ = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A_ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A_ = True
A_ = self.tokenizer
yield
A_ = self.feature_extractor
A_ = False
| 203
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowercase = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowercase = (
subprocess.check_output(f'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
)
__lowercase = """|""".join(sys.argv[1:])
__lowercase = re.compile(rf'^({joined_dirs}).*?\.py$')
__lowercase = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 203
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int = None , __lowerCAmelCase : int = None ) -> Tuple:
"""simple docstring"""
super().__init__()
A__ = pad_token_id
A__ = max_length
A__ = vocab
A__ = merges
A__ = BytePairTokenizer(__lowerCAmelCase , __lowerCAmelCase , sequence_length=__lowerCAmelCase )
@classmethod
def a_ ( cls : Optional[Any] , __lowerCAmelCase : GPTaTokenizer , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
A__ = [""" """.join(__lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
A__ = tokenizer.get_vocab()
return cls(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def a_ ( cls : Dict , __lowerCAmelCase : Union[str, os.PathLike] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = GPTaTokenizer.from_pretrained(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
return cls.from_tokenizer(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def a_ ( cls : Dict , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return cls(**__lowerCAmelCase )
def a_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a_ ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int = None ) -> Any:
"""simple docstring"""
A__ = self.tf_tokenizer(__lowerCAmelCase )
A__ = tf.ones_like(__lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A__ = max_length if max_length is not None else self.max_length
if max_length is not None:
A__ , A__ = pad_model_inputs(
__lowerCAmelCase , max_seq_length=__lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A : Dict = datasets.logging.get_logger(__name__)
A : Optional[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A : int = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A : Union[str, Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowerCamelCase ( __a :Dict , __a :int , __a :int=False , __a :Optional[Any]=False , __a :int=True , __a :Optional[int]=False , __a :Dict="dummy_doc" ) -> Any:
"""simple docstring"""
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
A__ , A__ = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(__a , __a )
A__ = reader.get_mention_assignments(__a , __a )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def __lowerCamelCase ( __a :Any , __a :Union[str, Any] , __a :List[str] , __a :Dict , __a :str , __a :Tuple , __a :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = get_coref_infos(__a , __a , __a , __a , __a , __a )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __lowerCamelCase ( __a :int ) -> List[Any]:
"""simple docstring"""
A__ = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False ) -> Optional[int]:
"""simple docstring"""
A__ = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 247
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "microsoft/speecht5_tts"
UpperCAmelCase__ : int = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
UpperCAmelCase__ : List[Any] = "text_reader"
UpperCAmelCase__ : Optional[Any] = SpeechTaProcessor
UpperCAmelCase__ : Union[str, Any] = SpeechTaForTextToSpeech
UpperCAmelCase__ : Union[str, Any] = SpeechTaHifiGan
UpperCAmelCase__ : Dict = ["text"]
UpperCAmelCase__ : str = ["audio"]
def __lowercase ( self ) -> Tuple:
if self.post_processor is None:
_a : str = '''microsoft/speecht5_hifigan'''
super().setup()
def __lowercase ( self , _a , _a=None ) -> Optional[Any]:
_a : Union[str, Any] = self.pre_processor(text=_a , return_tensors='''pt''' , truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_a : Dict = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_a : List[str] = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowercase ( self , _a ) -> List[str]:
with torch.no_grad():
return self.model.generate_speech(**_a )
def __lowercase ( self , _a ) -> Tuple:
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 14
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__a = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 374
| 0
|
"""simple docstring"""
import json
import sys
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as f:
UpperCAmelCase = json.load(lowerCAmelCase )
UpperCAmelCase = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCAmelCase ):
UpperCAmelCase = results[benchmark_name]
UpperCAmelCase = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase = """| metric |"""
UpperCAmelCase = """|--------|"""
UpperCAmelCase = """| new / old (diff) |"""
for metric_name in sorted(lowerCAmelCase ):
UpperCAmelCase = benchmark_res[metric_name]
UpperCAmelCase = metric_vals["""new"""]
UpperCAmelCase = metric_vals.get("""old""" , lowerCAmelCase )
UpperCAmelCase = metric_vals.get("""diff""" , lowerCAmelCase )
UpperCAmelCase = F''' {new_val:f}''' if isinstance(lowerCAmelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCAmelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCAmelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = sys.argv[1]
lowerCAmelCase_ : List[Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 378
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( a_ ):
_A : List[str] = ['pixel_values']
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = 0.9 , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = None , snake_case__ = 1 / 2_55 , snake_case__ = True , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_24}
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
UpperCAmelCase = get_size_dict(snake_case__ , param_name="""crop_size""" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = crop_pct
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCAmelCase = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCAmelCase = int(size["""height"""] / crop_pct )
else:
UpperCAmelCase = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(snake_case__ ) )
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ )
else:
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(snake_case__ ) )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> Tuple:
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(snake_case__ , param_name="""crop_size""" )
UpperCAmelCase = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 378
| 1
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Optional[Any] ) -> Union[str, Any]:
# Load checkpoint
__A : Union[str, Any] = torch.load(a__ ,map_location="""cpu""" )
__A : Optional[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__A : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__A : Tuple = v
else:
__A : Dict = v
__A : Optional[Any] = chkpt["""params"""]
__A : Tuple = {n: v for n, v in config.items() if not isinstance(a__ ,(torch.FloatTensor, numpy.ndarray) )}
__A : List[Any] = chkpt["""dico_word2id"""]
__A : Tuple = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" ,"""""" ): i for s, i in vocab.items()}
# Save pytorch-model
__A : Any = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__A : Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__A : Optional[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(a__ ,a__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(a__ ,indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(a__ ,indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 17
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
snake_case__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ):
for pegasus_name, hf_name in PATTERNS:
lowercase : int = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ):
lowercase : Optional[int] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
lowercase : Optional[Any] = PegasusConfig(**UpperCAmelCase_ )
lowercase : List[Any] = PegasusForConditionalGeneration(UpperCAmelCase_ )
lowercase : Optional[Any] = torch_model.model.state_dict()
lowercase : Optional[int] = {}
for k, v in tf_weights.items():
lowercase : str = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase : List[str] = v.T
lowercase : str = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase : Any = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowercase : Union[str, Any] = mapping['''shared.weight''']
lowercase : str = mapping['''shared.weight''']
lowercase : List[Any] = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
lowercase , lowercase : int = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
lowercase : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any]="./ckpt/aeslc/model.ckpt-32000" ):
lowercase : Dict = tf.train.list_variables(UpperCAmelCase_ )
lowercase : Optional[Any] = {}
lowercase : Optional[int] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(UpperCAmelCase_ , desc='''converting tf checkpoint to dict''' ):
lowercase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase : Tuple = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase : List[str] = array
return tf_weights
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# save tokenizer first
lowercase : str = Path(UpperCAmelCase_ ).parent.name
lowercase : List[Any] = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
lowercase : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
lowercase : List[str] = get_tf_weights_as_numpy(UpperCAmelCase_ )
lowercase : Any = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
lowercase : Optional[int] = task_specific_params
lowercase : str = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
lowercase : Any = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
snake_case__ = parser.parse_args()
if args.save_dir is None:
snake_case__ = Path(args.tf_ckpt_path).parent.name
snake_case__ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 583
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : int = '''ClapFeatureExtractor'''
a_ : List[Any] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__(self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase)
def __call__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =kwargs.pop('sampling_rate' , UpperCAmelCase)
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.')
if text is not None:
__UpperCAmelCase =self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase)
if audios is not None:
__UpperCAmelCase =self.feature_extractor(
UpperCAmelCase , sampling_rate=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase)
if text is not None and audios is not None:
__UpperCAmelCase =audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase) , tensor_type=UpperCAmelCase)
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase)
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase)
@property
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.tokenizer.model_input_names
__UpperCAmelCase =self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 701
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCamelCase_ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str:
__UpperCAmelCase ={}
state_dict.pop('''pixel_mean''' , snake_case__ )
state_dict.pop('''pixel_std''' , snake_case__ )
__UpperCAmelCase =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__UpperCAmelCase =key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
__UpperCAmelCase =int(re.match(snake_case__ , snake_case__ ).group(2 ) )
if layer_nb == 0:
__UpperCAmelCase =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
__UpperCAmelCase =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
__UpperCAmelCase =key.replace('''layers.2''' , '''proj_out''' )
__UpperCAmelCase =value
__UpperCAmelCase =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything" ) -> Optional[int]:
__UpperCAmelCase =hf_hub_download(snake_case__ , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
__UpperCAmelCase =SamConfig()
elif "sam_vit_l" in model_name:
__UpperCAmelCase =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__UpperCAmelCase =SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
__UpperCAmelCase =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__UpperCAmelCase =SamConfig(
vision_config=snake_case__ , )
__UpperCAmelCase =torch.load(snake_case__ , map_location='''cpu''' )
__UpperCAmelCase =replace_keys(snake_case__ )
__UpperCAmelCase =SamImageProcessor()
__UpperCAmelCase =SamProcessor(image_processor=snake_case__ )
__UpperCAmelCase =SamModel(snake_case__ )
hf_model.load_state_dict(snake_case__ )
__UpperCAmelCase =hf_model.to('''cuda''' )
__UpperCAmelCase ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__UpperCAmelCase =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' )
__UpperCAmelCase =[[[400, 650]]]
__UpperCAmelCase =[[1]]
__UpperCAmelCase =processor(images=np.array(snake_case__ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__UpperCAmelCase =processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__UpperCAmelCase =((75, 275, 1725, 850),)
__UpperCAmelCase =processor(images=np.array(snake_case__ ) , input_boxes=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__UpperCAmelCase =[[[400, 650], [800, 650]]]
__UpperCAmelCase =[[1, 1]]
__UpperCAmelCase =processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
UpperCamelCase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 142
| 0
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = row
__A : Any = col
__A : Union[str, Any] = graph
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__A : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
__A : str = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self): # And finally, count all islands.
'''simple docstring'''
__A : Tuple = [[False for j in range(self.COL)] for i in range(self.ROW)]
__A : Optional[Any] = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
count += 1
return count
| 8
|
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = len(lowercase )
__lowercase = len(lowercase )
__lowercase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase = True
for i in range(lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase = True
if a[i].islower():
__lowercase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 534
| 0
|
def UpperCamelCase (lowercase_: int = 10 , lowercase_: int = 22 ) -> int:
A__ : Any = range(1 , lowercase_ )
A__ : str = range(1 , lowercase_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 64
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64
| 1
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=99 , A_=13 , A_=16 , A_=7 , A_=True , A_=True , A_=True , A_=False , A_=True , A_=2 , A_=32 , A_=4 , A_=4 , A_=30 , A_=0 , A_=1 , A_=2 , A_=None , )-> str:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = decoder_seq_length
# For common tests
UpperCamelCase = self.decoder_seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = d_model
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_attention_heads
UpperCamelCase = eos_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = decoder_start_token_id
UpperCamelCase = use_cache
UpperCamelCase = max_position_embeddings
UpperCamelCase = None
UpperCamelCase = decoder_seq_length
UpperCamelCase = 2
UpperCamelCase = 1
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = TrOCRDecoder(config=A_ ).to(A_ ).eval()
UpperCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase = model(A_ , use_cache=A_ )
UpperCamelCase = model(A_ )
UpperCamelCase = model(A_ , use_cache=A_ )
self.parent.assertTrue(len(A_ ) == len(A_ ) )
self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 )
UpperCamelCase = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = model(A_ )['last_hidden_state']
UpperCamelCase = model(A_ , past_key_values=A_ )['last_hidden_state']
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A_ , A_ , atol=1e-3 )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase_ = True
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=A_ )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
pass
| 3
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Tuple = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__snake_case : Dict = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , x.transpose() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : str = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : List[str] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : Any = np.random.randn(3 , 4 )
__snake_case : Dict = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , transpose(_UpperCAmelCase ).numpy() ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
__snake_case : int = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , transpose(_UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase ) , np.asarray(transpose(_UpperCAmelCase ) ) ) )
__snake_case : List[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ):
__snake_case : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.reshape(_UpperCAmelCase , (4, 3) ) ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.reshape(_UpperCAmelCase , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Optional[int] = np.random.randn(3 , 4 )
__snake_case : Optional[int] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
__snake_case : int = np.random.randn(3 , 4 , 5 )
__snake_case : int = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : str = np.random.randn(3 , 4 )
__snake_case : Optional[int] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , reshape(_UpperCAmelCase , (4, 3) ).numpy() ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
__snake_case : List[str] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , reshape(_UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (4, 3) ) , np.asarray(reshape(_UpperCAmelCase , (4, 3) ) ) ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(reshape(_UpperCAmelCase , (12, 5) ) , np.asarray(reshape(_UpperCAmelCase , (12, 5) ) ) ) )
def lowercase_ ( self ):
__snake_case : Any = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.squeeze(_UpperCAmelCase ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.squeeze(_UpperCAmelCase , axis=2 ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Tuple = np.random.randn(1 , 3 , 4 )
__snake_case : int = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
__snake_case : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Tuple = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : List[Any] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[Any] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , squeeze(_UpperCAmelCase ).numpy() ) )
__snake_case : Dict = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , squeeze(_UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : List[str] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[Any] = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase ) , np.asarray(squeeze(_UpperCAmelCase ) ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(_UpperCAmelCase , axis=2 ) , np.asarray(squeeze(_UpperCAmelCase , axis=2 ) ) ) )
def lowercase_ ( self ):
__snake_case : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.expand_dims(_UpperCAmelCase , axis=1 ) ) )
@require_torch
def lowercase_ ( self ):
__snake_case : Dict = np.random.randn(3 , 4 )
__snake_case : List[str] = torch.tensor(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ):
__snake_case : int = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = tf.constant(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , expand_dims(_UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ):
__snake_case : Union[str, Any] = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(_UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(_UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(_UpperCAmelCase , axis=1 ) ) ) )
| 576
| 0
|
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
return np.dot(lowercase_ , lowercase_ )
class snake_case :
def __init__( self :Any , *,
_lowerCamelCase :float = np.inf , _lowerCamelCase :str = "linear" , _lowerCamelCase :float = 0.0 , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = regularization
__SCREAMING_SNAKE_CASE : int = gamma
if kernel == "linear":
__SCREAMING_SNAKE_CASE : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__SCREAMING_SNAKE_CASE : int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__SCREAMING_SNAKE_CASE : Tuple = f'''Unknown kernel: {kernel}'''
raise ValueError(A_ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :ndarray , _lowerCamelCase :ndarray ):
return np.dot(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :ndarray , _lowerCamelCase :ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def SCREAMING_SNAKE_CASE_ ( self :Dict , _lowerCamelCase :list[ndarray] , _lowerCamelCase :ndarray ):
__SCREAMING_SNAKE_CASE : Any = observations
__SCREAMING_SNAKE_CASE : Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__SCREAMING_SNAKE_CASE ) , ) : Union[str, Any] = np.shape(A_ )
def to_minimize(_lowerCamelCase :ndarray ) -> float:
__SCREAMING_SNAKE_CASE : str = 0
((__SCREAMING_SNAKE_CASE ) , ) : Tuple = np.shape(A_ )
for i in range(A_ ):
for j in range(A_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(A_ )
__SCREAMING_SNAKE_CASE : Dict = LinearConstraint(A_ , 0 , 0 )
__SCREAMING_SNAKE_CASE : Dict = Bounds(0 , self.regularization )
__SCREAMING_SNAKE_CASE : Optional[Any] = minimize(
A_ , np.ones(A_ ) , bounds=A_ , constraints=[ly_contraint] ).x
__SCREAMING_SNAKE_CASE : List[Any] = l_star
# calculating mean offset of separation plane to points
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i in range(A_ ):
for j in range(A_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__SCREAMING_SNAKE_CASE : str = s / n
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :ndarray ):
__SCREAMING_SNAKE_CASE : Optional[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , A_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''SpeechT5FeatureExtractor'''
lowerCamelCase__ = '''SpeechT5Tokenizer'''
def __init__( self :List[Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self :Optional[int] , *_lowerCamelCase :Dict , **_lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''audio''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''text''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''text_target''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''audio_target''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
elif text is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor(audio_target=_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = targets['''input_values''']
elif text_target is not None:
__SCREAMING_SNAKE_CASE : str = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : int = labels
__SCREAMING_SNAKE_CASE : Dict = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Any = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , *_lowerCamelCase :Dict , **_lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''input_values''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''input_ids''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''labels''' , _lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE : int = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowerCamelCase , _lowerCamelCase ) and "input_ids" in labels[0]):
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.feature_size
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.num_mel_bins
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = feature_size_hack
__SCREAMING_SNAKE_CASE : Any = targets['''input_values''']
else:
__SCREAMING_SNAKE_CASE : Dict = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : List[Any] = labels
__SCREAMING_SNAKE_CASE : int = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Tuple , *_lowerCamelCase :Tuple , **_lowerCamelCase :Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , *_lowerCamelCase :List[Any] , **_lowerCamelCase :List[str] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
| 401
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase : Optional[Any] = input("""Enter image url: """).strip()
print(F"""Downloading image from {url} ...""")
lowercase : List[Any] = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowercase : str = soup.find("""meta""", {"""property""": """og:image"""})["content"]
lowercase : Any = requests.get(image_url).content
lowercase : Dict = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 302
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
class __lowercase ( A ):
__magic_name__ : Any = '''sequence-classification'''
def __init__( self , a__ ) -> Optional[Any]:
'''simple docstring'''
if type(a__ ) == dict:
A_ = Namespace(**a__ )
A_ = glue_output_modes[hparams.task]
A_ = glue_tasks_num_labels[hparams.task]
super().__init__(a__ , a__ , self.mode )
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
return self.model(**a__ )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ = outputs[0]
A_ = self.trainer.lr_schedulers[0]['''scheduler''']
A_ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = self.hparams
A_ = processors[args.task]()
A_ = processor.get_labels()
for mode in ["train", "dev"]:
A_ = self._feature_file(a__ )
if os.path.exists(a__ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , a__ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
A_ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
A_ = convert_examples_to_features(
a__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(a__ , a__ )
def lowerCAmelCase_ ( self , a__ , a__ , a__ = False ) -> DataLoader:
'''simple docstring'''
A_ = '''dev''' if mode == '''test''' else mode
A_ = self._feature_file(a__ )
logger.info('''Loading features from cached file %s''' , a__ )
A_ = torch.load(a__ )
A_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A_ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A_ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a__ , a__ , a__ , a__ ) , batch_size=a__ , shuffle=a__ , )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ , A_ = outputs[:2]
A_ = logits.detach().cpu().numpy()
A_ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self , a__ ) -> tuple:
'''simple docstring'''
A_ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
A_ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A_ = np.argmax(a__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A_ = np.squeeze(a__ )
A_ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , a__ , a__ )}
A_ = dict(results.items() )
A_ = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( a__ , a__ ) -> Dict:
'''simple docstring'''
BaseTransformer.add_model_specific_args(a__ , a__ )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=a__ , required=a__ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=a__ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowerCamelCase_ ( ):
A_ = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
A_ = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
A_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A_ = os.path.join(
'''./results''' , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A_ = GLUETransformer(__UpperCamelCase )
A_ = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A_ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__UpperCamelCase ) )
A_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 141
| 0
|
'''simple docstring'''
def _a ( _lowercase : int , _lowercase : list[int] , _lowercase : int ):
'''simple docstring'''
def count_of_possible_combinations(_lowercase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowercase )
def _a ( _lowercase : int , _lowercase : list[int] , _lowercase : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowercase : int , _lowercase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCAmelCase : Optional[Any] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowercase )
for item in array )
__UpperCAmelCase : Optional[Any] = answer
return answer
__UpperCAmelCase : Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowercase , _lowercase )
def _a ( _lowercase : int , _lowercase : list[int] , _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [0] * (target + 1)
__UpperCAmelCase : Union[str, Any] = 1
for i in range(1 , target + 1 ):
for j in range(_lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase :int = 3
__UpperCAmelCase :Tuple = 5
__UpperCAmelCase :Any = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 266
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :List[Any] = logging.get_logger(__name__)
def _a ( _lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__UpperCAmelCase : int = BitConfig(
conv_layer=_lowercase , num_labels=1000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def _a ( _lowercase : Tuple ):
'''simple docstring'''
if "stem.conv" in name:
__UpperCAmelCase : Any = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
__UpperCAmelCase : Dict = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
__UpperCAmelCase : List[Any] = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
__UpperCAmelCase : List[str] = '''bit.encoder.''' + name
return name
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _a ( _lowercase : Tuple , _lowercase : Dict , _lowercase : Tuple=False ):
'''simple docstring'''
__UpperCAmelCase : Any = get_config(_lowercase )
# load original model from timm
__UpperCAmelCase : Tuple = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
__UpperCAmelCase : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__UpperCAmelCase : Optional[Any] = state_dict.pop(_lowercase )
__UpperCAmelCase : int = val.squeeze() if '''head''' in key else val
# load HuggingFace model
__UpperCAmelCase : Any = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
__UpperCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_lowercase ) )
__UpperCAmelCase : Union[str, Any] = transform.transforms
__UpperCAmelCase : str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__UpperCAmelCase : List[Any] = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : Any = transform(_lowercase ).unsqueeze(0 )
__UpperCAmelCase : List[Any] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(_lowercase )
__UpperCAmelCase : Union[str, Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
__UpperCAmelCase : Optional[int] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 266
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __UpperCAmelCase :
__lowerCamelCase : List[Any] = None
def UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
a__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
a__ : List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[str] = os.path.join(_SCREAMING_SNAKE_CASE , "feat_extract.json" )
feat_extract_first.to_json_file(_SCREAMING_SNAKE_CASE )
a__ : Dict = self.feature_extraction_class.from_json_file(_SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
a__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = feat_extract_first.save_pretrained(_SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(_SCREAMING_SNAKE_CASE )
a__ : int = self.feature_extraction_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 642
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 518
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class UpperCAmelCase__ ( __UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = """llama"""
lowerCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
def __init__( self : List[Any] , snake_case : int=32_000 , snake_case : Optional[Any]=4_096 , snake_case : Dict=11_008 , snake_case : List[Any]=32 , snake_case : Any=32 , snake_case : Optional[Any]=None , snake_case : Optional[Any]="silu" , snake_case : List[Any]=2_048 , snake_case : Dict=0.02 , snake_case : str=1E-6 , snake_case : Any=True , snake_case : Optional[Any]=0 , snake_case : Tuple=1 , snake_case : str=2 , snake_case : Optional[int]=1 , snake_case : str=False , snake_case : Union[str, Any]=None , **snake_case : int , ) -> Any:
'''simple docstring'''
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = intermediate_size
A = num_hidden_layers
A = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
A = num_attention_heads
A = num_key_value_heads
A = hidden_act
A = initializer_range
A = rms_norm_eps
A = pretraining_tp
A = use_cache
A = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , tie_word_embeddings=snake_case , **snake_case , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
A = self.rope_scaling.get('type' , snake_case )
A = self.rope_scaling.get('factor' , snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case , snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 717
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A = logging.get_logger(__name__)
A = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : str = field(
default=UpperCamelCase ,metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase )} )
lowerCAmelCase_ : str = field(
default=UpperCamelCase ,metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
lowerCAmelCase_ : int = field(
default=1_28 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowerCAmelCase_ : int = field(
default=1_28 ,metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} ,)
lowerCAmelCase_ : int = field(
default=64 ,metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} ,)
lowerCAmelCase_ : int = field(
default=30 ,metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
lowerCAmelCase_ : float = field(
default=0.0 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase_ : int = field(
default=20 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase_ : int = field(
default=0 ,metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} ,)
lowerCAmelCase_ : int = field(default=1 ,metadata={"""help""": """multiple threads for converting example to features"""} )
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : str = """train"""
lowerCAmelCase_ : Union[str, Any] = """dev"""
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : SquadDataTrainingArguments
lowerCAmelCase_ : List[SquadFeatures]
lowerCAmelCase_ : Split
lowerCAmelCase_ : bool
def __init__( self : List[Any] , snake_case : SquadDataTrainingArguments , snake_case : PreTrainedTokenizer , snake_case : Optional[int] = None , snake_case : Union[str, Split] = Split.train , snake_case : Optional[bool] = False , snake_case : Optional[str] = None , snake_case : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
A = args
A = is_language_sensitive
A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case , snake_case ):
try:
A = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A = mode
# Load data features from cache or dataset file
A = 'v2' if args.version_2_with_negative else 'v1'
A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
A = time.time()
A = torch.load(snake_case )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A = self.old_features['features']
A = self.old_features.get('dataset' , snake_case )
A = self.old_features.get('examples' , snake_case )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
' future run' )
else:
if mode == Split.dev:
A = self.processor.get_dev_examples(args.data_dir )
else:
A = self.processor.get_train_examples(args.data_dir )
A , A = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case , )
A = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , snake_case , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Any ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A = self.features[i]
A = torch.tensor(feature.input_ids , dtype=torch.long )
A = torch.tensor(feature.attention_mask , dtype=torch.long )
A = torch.tensor(feature.token_type_ids , dtype=torch.long )
A = torch.tensor(feature.cls_index , dtype=torch.long )
A = torch.tensor(feature.p_mask , dtype=torch.float )
A = torch.tensor(feature.is_impossible , dtype=torch.float )
A = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A = torch.tensor(feature.start_position , dtype=torch.long )
A = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 109
| 0
|
def lowercase_ ( _UpperCamelCase = 50 ):
'''simple docstring'''
__lowercase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 639
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a : Union[str, Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
a : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a : List[Any] = dict(zip(vocab, range(len(vocab))))
a : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
a : Union[str, Any] = Path(tmpdirname)
a : Dict = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
a : Optional[int] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
a : Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
a : Optional[int] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a : List[str] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a : Union[str, Any] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
a : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
a : Union[str, Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 639
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : List[str] = """altclip_text_model"""
def __init__( self , _SCREAMING_SNAKE_CASE=25_0002 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=514 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-05 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=768 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = initializer_factor
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = project_dim
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : int = """altclip_vision_model"""
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="quick_gelu" , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
a_ = hidden_size
a_ = intermediate_size
a_ = projection_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = num_channels
a_ = patch_size
a_ = image_size
a_ = initializer_range
a_ = initializer_factor
a_ = attention_dropout
a_ = layer_norm_eps
a_ = hidden_act
@classmethod
def __magic_name__ ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
a_ , a_ = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
a_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Optional[int] = """altclip"""
_lowerCamelCase : Any = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2.6_5_9_2 , **_SCREAMING_SNAKE_CASE ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
a_ = kwargs.pop("""text_config_dict""" , _SCREAMING_SNAKE_CASE )
a_ = kwargs.pop("""vision_config_dict""" , _SCREAMING_SNAKE_CASE )
super().__init__(**_SCREAMING_SNAKE_CASE )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
a_ = {}
# This is the complete result when using `text_config_dict`.
a_ = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
a_ = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
a_ = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
a_ = {}
# This is the complete result when using `vision_config_dict`.
a_ = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
a_ = {
str(_SCREAMING_SNAKE_CASE ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
a_ = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
a_ = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
a_ = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
a_ = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
a_ = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE )
a_ = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE )
a_ = projection_dim
a_ = logit_scale_init_value
a_ = 1.0
@classmethod
def __magic_name__ ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = copy.deepcopy(self.__dict__ )
a_ = self.text_config.to_dict()
a_ = self.vision_config.to_dict()
a_ = self.__class__.model_type
return output
| 403
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
a_ = argparse.ArgumentParser(add_help=UpperCamelCase , allow_abbrev=UpperCamelCase )
# The main config parser
a_ = config_command_parser(UpperCamelCase )
# The subparser to add commands to
a_ = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(UpperCamelCase , parents=[parent_parser] )
update_command_parser(UpperCamelCase , parents=[parent_parser] )
return config_parser
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
a_ = get_config_parser()
a_ = config_parser.parse_args()
if not hasattr(UpperCamelCase , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase )
if __name__ == "__main__":
main()
| 403
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : List[Any] = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowerCamelCase : Union[str, Any] = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def _lowerCAmelCase ( __magic_name__ :Any ):
UpperCAmelCase_ = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
_lowerCamelCase : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] ):
UpperCAmelCase_ = list(s_dict.keys() )
for key in keys:
UpperCAmelCase_ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
UpperCAmelCase_ = new_key.replace(__magic_name__ , __magic_name__ )
print(F'''{key} -> {new_key}''' )
UpperCAmelCase_ = s_dict.pop(__magic_name__ )
return s_dict
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] ):
UpperCAmelCase_, UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
UpperCAmelCase_ = os.path.basename(__magic_name__ )
UpperCAmelCase_ = url.split('''/''' )[-2]
UpperCAmelCase_ = os.path.join(__magic_name__ , __magic_name__ )
if os.path.exists(__magic_name__ ) and not os.path.isfile(__magic_name__ ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(__magic_name__ ):
UpperCAmelCase_ = open(__magic_name__ , '''rb''' ).read()
if hashlib.shaaaa(__magic_name__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(__magic_name__ ) as source, open(__magic_name__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=__magic_name__ , unit_divisor=1_0_2_4 ) as loop:
while True:
UpperCAmelCase_ = source.read(8_1_9_2 )
if not buffer:
break
output.write(__magic_name__ )
loop.update(len(__magic_name__ ) )
UpperCAmelCase_ = open(__magic_name__ , '''rb''' ).read()
if hashlib.shaaaa(__magic_name__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def _lowerCAmelCase ( __magic_name__ :List[Any] , __magic_name__ :Any ):
if ".pt" not in checkpoint_path:
UpperCAmelCase_ = _download(_MODELS[checkpoint_path] )
else:
UpperCAmelCase_ = torch.load(__magic_name__ , map_location='''cpu''' )
UpperCAmelCase_ = original_checkpoint['''dims''']
UpperCAmelCase_ = original_checkpoint['''model_state_dict''']
UpperCAmelCase_ = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(__magic_name__ )
rename_keys(__magic_name__ )
UpperCAmelCase_ = True
UpperCAmelCase_ = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
UpperCAmelCase_ = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__magic_name__ , decoder_ffn_dim=__magic_name__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
UpperCAmelCase_ = WhisperForConditionalGeneration(__magic_name__ )
UpperCAmelCase_, UpperCAmelCase_ = model.model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if len(__magic_name__ ) > 0 and not set(__magic_name__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCAmelCase_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase_ = proj_out_weights
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCamelCase : Any = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 121
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
lowerCAmelCase = [True] * (num + 1)
lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 393
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase = (n * (n + 1) // 2) ** 2
lowerCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 393
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = False
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
_lowerCamelCase = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
_lowerCamelCase = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
_lowerCamelCase = reader.read()
_lowerCamelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
_lowerCamelCase = UNetaDModel(**config)
else:
_lowerCamelCase = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
_lowerCamelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCamelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCamelCase = config[key]
del config[key]
_lowerCamelCase = [k.replace('UNetRes', '') for k in config['down_block_types']]
_lowerCamelCase = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
_lowerCamelCase = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
_lowerCamelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
_lowerCamelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
_lowerCamelCase = param_value
_lowerCamelCase = True
if not has_changed:
_lowerCamelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 6
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_SCREAMING_SNAKE_CASE : List[Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowercase ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Dict ) -> str:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : List[str] = '''lm_head'''
UpperCamelCase__ : Any = getattr(__lowerCamelCase ,__lowerCamelCase )
if weight_type is not None:
UpperCamelCase__ : List[str] = getattr(__lowerCamelCase ,__lowerCamelCase ).shape
else:
UpperCamelCase__ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : int = value
elif weight_type == "weight_g":
UpperCamelCase__ : int = value
elif weight_type == "weight_v":
UpperCamelCase__ : int = value
elif weight_type == "bias":
UpperCamelCase__ : Optional[Any] = value
else:
UpperCamelCase__ : Any = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : Any ,__lowerCamelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Any = []
UpperCamelCase__ : Union[str, Any] = fairseq_model.state_dict()
UpperCamelCase__ : List[str] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
UpperCamelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : Dict = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCamelCase__ : int = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
UpperCamelCase__ : int = mapped_key.replace('''*''' ,__lowerCamelCase )
if "weight_g" in name:
UpperCamelCase__ : str = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ : Union[str, Any] = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ : Dict = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : List[Any] = '''weight'''
else:
UpperCamelCase__ : Optional[Any] = None
set_recursively(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Any = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ : Optional[int] = name.split('''.''' )
UpperCamelCase__ : Optional[int] = int(items[0] )
UpperCamelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : str = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : Dict = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def _lowercase ( __lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : List[Any]=None ,__lowerCamelCase : Tuple=True ) -> Any:
'''simple docstring'''
if config_path is not None:
UpperCamelCase__ : Optional[int] = UniSpeechConfig.from_pretrained(__lowerCamelCase )
else:
UpperCamelCase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : List[Any] = Dictionary.load_from_json(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[str] = target_dict.pad_index
UpperCamelCase__ : Optional[Any] = target_dict.bos_index
UpperCamelCase__ : str = target_dict.eos_index
UpperCamelCase__ : Optional[Any] = len(target_dict.symbols )
UpperCamelCase__ : int = os.path.join(__lowerCamelCase ,'''vocab.json''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
UpperCamelCase__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Tuple = 42
UpperCamelCase__ : List[str] = 43
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : Tuple = WavaVecaPhonemeCTCTokenizer(
__lowerCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__lowerCamelCase ,)
UpperCamelCase__ : List[str] = True if config.feat_extract_norm == '''layer''' else False
UpperCamelCase__ : int = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,)
UpperCamelCase__ : Dict = WavaVecaProcessor(feature_extractor=__lowerCamelCase ,tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
UpperCamelCase__ : int = UniSpeechForCTC(__lowerCamelCase )
else:
UpperCamelCase__ : Any = UniSpeechForPreTraining(__lowerCamelCase )
if is_finetuned:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase__ : Tuple = model[0].eval()
recursively_load_weights(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
hf_unispeech.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 344
| 0
|
from statistics import mean, stdev
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case = 3 ) -> list:
_UpperCAmelCase = min(snake_case )
_UpperCAmelCase = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case = 3 ) -> list:
_UpperCAmelCase = mean(snake_case )
_UpperCAmelCase = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 175
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Optional[int]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase = features.copy()
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , split=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Any:
if issubclass(snake_case , snake_case ):
_UpperCAmelCase = jsonl_path
elif issubclass(snake_case , snake_case ):
_UpperCAmelCase = [jsonl_path]
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=("train",) ) -> str:
assert isinstance(snake_case , snake_case )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , features=snake_case , cache_dir=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
if split:
_UpperCAmelCase = {split: jsonl_path}
else:
_UpperCAmelCase = """train"""
_UpperCAmelCase = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return json.load(snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return [json.loads(snake_case ) for line in buffer]
class _A :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_SCREAMING_SNAKE_CASE ) == 10
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}"
_UpperCAmelCase = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compression=_SCREAMING_SNAKE_CASE ).write()
with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
assert exported_content == original_content
| 175
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Dict = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408
|
from manim import *
class _a ( A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase =Rectangle(height=0.25 , width=0.25 )
_UpperCAmelCase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCAmelCase =[mem.copy() for i in range(6 )]
_UpperCAmelCase =[mem.copy() for i in range(6 )]
_UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =Text("CPU" , font_size=24 )
_UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
_UpperCAmelCase =[mem.copy() for i in range(4 )]
_UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =Text("GPU" , font_size=24 )
_UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.move_to([-1, -1, 0] )
self.add(_snake_case )
_UpperCAmelCase =[mem.copy() for i in range(6 )]
_UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =Text("Model" , font_size=24 )
_UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.add(_snake_case )
_UpperCAmelCase =[]
_UpperCAmelCase =[]
_UpperCAmelCase =[]
for i, rect in enumerate(_snake_case ):
rect.set_stroke(_snake_case )
_UpperCAmelCase =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_snake_case , buff=0.0 )
self.add(_snake_case )
model_cpu_arr.append(_snake_case )
self.add(*_snake_case , *_snake_case , *_snake_case )
_UpperCAmelCase =[mem.copy() for i in range(6 )]
_UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =Text("Loaded Checkpoint" , font_size=24 )
_UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(_snake_case )
_UpperCAmelCase =[]
_UpperCAmelCase =[]
for i, rect in enumerate(_snake_case ):
_UpperCAmelCase =fill.copy().set_fill(_snake_case , opacity=0.7 )
target.move_to(_snake_case )
ckpt_arr.append(_snake_case )
_UpperCAmelCase =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_snake_case )
self.add(*_snake_case , *_snake_case )
_UpperCAmelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase =MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_snake_case , _snake_case )
_UpperCAmelCase =MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_snake_case )
_UpperCAmelCase =MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_UpperCAmelCase =[meta_mem.copy() for i in range(6 )]
_UpperCAmelCase =[meta_mem.copy() for i in range(6 )]
_UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
_UpperCAmelCase =Text("Disk" , font_size=24 )
_UpperCAmelCase =Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_snake_case , run_time=3 ) , Write(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) )
_UpperCAmelCase =[]
for i, rect in enumerate(_snake_case ):
_UpperCAmelCase =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(FadeOut(_snake_case ) )
_UpperCAmelCase =MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case , run_time=3 ) )
self.play(
FadeOut(_snake_case , _snake_case , *_snake_case , *_snake_case ) , )
self.wait()
| 408
| 1
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=4 ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A__ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =True
lowercase_ : Union[str, Any] =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self):
lowercase = FlaxRoFormerModelTester(self)
@slow
def A__ ( self):
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' ,from_pt=A__)
lowercase = model(np.ones((1, 1)))
self.assertIsNotNone(A__)
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''')
lowercase = jnp.array([[0, 1, 2, 3, 4, 5]])
lowercase = model(A__)[0]
lowercase = 5_0_0_0_0
lowercase = (1, 6, vocab_size)
self.assertEqual(output.shape ,A__)
lowercase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] ,A__ ,atol=1E-4))
| 633
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 633
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 2_0
_lowerCamelCase = self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
_lowerCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowerCamelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowerCamelCase = jax.nn.softmax(_lowerCAmelCase , axis=-1 )
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowerCamelCase = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
_lowerCamelCase = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 1_0
_lowerCamelCase = 2
# create ramp distribution
_lowerCamelCase = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
_lowerCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowerCamelCase = 5
_lowerCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_lowerCamelCase = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
_lowerCamelCase = top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 1_0
_lowerCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowerCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
_lowerCamelCase = np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowerCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
_lowerCamelCase = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowerCamelCase = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
_lowerCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_lowerCamelCase = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
_lowerCamelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
_lowerCamelCase = 5
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = 1_5
_lowerCamelCase = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
_lowerCamelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 )
_lowerCamelCase = 1
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowerCamelCase = 3
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = 5
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowerCamelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 )
_lowerCamelCase = 4
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowerCamelCase = 3
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 4
_lowerCamelCase = 1_0
_lowerCamelCase = 1_5
_lowerCamelCase = 2
_lowerCamelCase = 1
_lowerCamelCase = 1_5
# dummy input_ids and scores
_lowerCamelCase = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
_lowerCamelCase = input_ids.copy()
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = scores.copy()
# instantiate all dist processors
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowerCAmelCase )
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCamelCase = 1_0
# no processor list
_lowerCamelCase = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
_lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case__ ( self ):
_lowerCamelCase = 4
_lowerCamelCase = 1_0
_lowerCamelCase = 1_5
_lowerCamelCase = 2
_lowerCamelCase = 1
_lowerCamelCase = 1_5
# dummy input_ids and scores
_lowerCamelCase = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
_lowerCamelCase = input_ids.copy()
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = scores.copy()
# instantiate all dist processors
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowerCAmelCase )
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCamelCase = 1_0
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
_lowerCamelCase = jax.jit(_lowerCAmelCase )
_lowerCamelCase = jax.jit(_lowerCAmelCase )
_lowerCamelCase = jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 661
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase_ : List[Any] = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
lowerCamelCase_ : Optional[int] = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase_ : Union[str, Any] = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
lowerCamelCase_ : Union[str, Any] = {F'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class a__ ( lowercase__ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Dict = FunnelTokenizer
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = 2
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="<unk>" , UpperCAmelCase="<sep>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase="##" , **UpperCAmelCase , ) -> str:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , clean_text=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , wordpieces_prefix=__lowerCamelCase , **__lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowerCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowerCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowerCamelCase ) != tokenize_chinese_chars
):
__a = getattr(__lowerCamelCase , normalizer_state.pop('type' ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__lowerCamelCase )
__a = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Any:
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[Any]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Dict:
__a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 705
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Dict = logging.get_logger()
@dataclass
class a__ :
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=__snake_case )
A__ : list = field(default_factory=__snake_case )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
__a = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase , nn.Convad ) or isinstance(UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase )
def __call__( self , UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a__ :
A__ : nn.Module
A__ : nn.Module
A__ : int = 0
A__ : List = field(default_factory=__snake_case )
A__ : List = field(default_factory=__snake_case )
def __call__( self , UpperCAmelCase ) -> List[str]:
__a = Tracker(self.dest )(UpperCAmelCase ).parametrized
__a = Tracker(self.src )(UpperCAmelCase ).parametrized
__a = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.src_skip , UpperCAmelCase ) )
__a = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.dest_skip , UpperCAmelCase ) )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(UpperCAmelCase )} operations while'''
f''' destination module has {len(UpperCAmelCase )}.''' )
for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
__a = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
__a = ResNetForImageClassification(__lowerCamelCase ).eval()
__a = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
__a = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=__lowerCamelCase , )
print(f'''Pushed {checkpoint_name}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ):
__a = 'imagenet-1k-id2label.json'
__a = 1000
__a = (1, num_labels)
__a = 'huggingface/label-files'
__a = num_labels
__a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) )
__a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
__a = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase_ : Any = parser.parse_args()
lowerCamelCase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 246
| 0
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""nielsr/canine-s""": 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
snake_case__ = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
snake_case__ = 0
snake_case__ = 0XE000
snake_case__ = 0XE001
snake_case__ = 0XE002
snake_case__ = 0XE003
snake_case__ = 0XE004
# Maps special codepoints to human-readable names.
snake_case__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
snake_case__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase ( __lowerCamelCase ):
a__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , lowerCAmelCase : Optional[int]=chr(lowerCAmelCase ) , lowerCAmelCase : Optional[Any]=chr(lowerCAmelCase ) , lowerCAmelCase : Optional[Any]=chr(lowerCAmelCase ) , lowerCAmelCase : Union[str, Any]=chr(lowerCAmelCase ) , lowerCAmelCase : Union[str, Any]=chr(lowerCAmelCase ) , lowerCAmelCase : Optional[int]=chr(lowerCAmelCase ) , lowerCAmelCase : Any=False , lowerCAmelCase : Any=2048 , **lowerCAmelCase : Tuple , ):
lowercase : List[Any] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token
lowercase : Tuple = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
lowercase : Optional[int] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token
lowercase : Any = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token
lowercase : str = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Dict = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , model_max_length=lowerCAmelCase , **lowerCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
lowercase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowercase : List[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowercase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowercase : Dict = UNICODE_VOCAB_SIZE
lowercase : Union[str, Any] = len(self._special_codepoints )
@property
def _lowerCAmelCase ( self : str ):
return self._unicode_vocab_size
def _lowerCAmelCase ( self : int , lowerCAmelCase : str ):
return list(lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase : str ):
try:
return ord(lowerCAmelCase )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase : int ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCAmelCase )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _lowerCAmelCase ( self : str , lowerCAmelCase : List[Any] ):
return "".join(lowerCAmelCase )
def _lowerCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
lowercase : Any = [self.sep_token_id]
lowercase : List[str] = [self.cls_token_id]
lowercase : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _lowerCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
lowercase : int = [1] + ([0] * len(lowerCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCAmelCase )) + [1]
return result
def _lowerCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
lowercase : Dict = [self.sep_token_id]
lowercase : List[Any] = [self.cls_token_id]
lowercase : str = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _lowerCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
return ()
| 583
|
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def lowerCamelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowerCamelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 583
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_lowerCAmelCase = get_tests_dir("fixtures")
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
_UpperCamelCase = mock.Mock()
_UpperCamelCase = 500
_UpperCamelCase = {}
_UpperCamelCase = HTTPError
_UpperCamelCase = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_A ) as mock_head:
_UpperCamelCase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
_UpperCamelCase = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def UpperCamelCase_ ( self : str ):
with self.assertRaises(_A ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_A )
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
@classmethod
def UpperCamelCase_ ( cls : Optional[int] ):
_UpperCamelCase = TOKEN
HfFolder.save_token(_A )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] ):
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = ViTImageProcessor.from_pretrained(_A )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
_UpperCamelCase = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_A , repo_id='''test-image-processor''' , push_to_hub=_A , use_auth_token=self._token )
_UpperCamelCase = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ViTImageProcessor.from_pretrained(_A )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
_UpperCamelCase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_A , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_A , use_auth_token=self._token )
_UpperCamelCase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def UpperCamelCase_ ( self : List[Any] ):
CustomImageProcessor.register_for_auto_class()
_UpperCamelCase = CustomImageProcessor.from_pretrained(_A )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "gpt_neox"
def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ):
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCamelCase_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , _A )
_UpperCamelCase = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 71
| 0
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56
|
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def a_ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
snake_case: Union[str, Any] =create_model(
'HTSAT-tiny' , 'roberta' , __UpperCAmelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__UpperCAmelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def a_ ( __UpperCAmelCase ) -> Any:
"""simple docstring"""
snake_case: Any ={}
snake_case: Any =R'.*sequential.(\d+).*'
snake_case: List[Any] =R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case: Optional[int] =key.replace(__UpperCAmelCase , __UpperCAmelCase )
if re.match(__UpperCAmelCase , __UpperCAmelCase ):
# replace sequential layers with list
snake_case: Dict =re.match(__UpperCAmelCase , __UpperCAmelCase ).group(1 )
snake_case: Optional[int] =key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(__UpperCAmelCase )//3}.linear.''' )
elif re.match(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: List[str] =int(re.match(__UpperCAmelCase , __UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
snake_case: List[Any] =1 if projecton_layer == 0 else 2
snake_case: Any =key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
snake_case: List[Any] =value
snake_case: Any =mixed_qkv.size(0 ) // 3
snake_case: List[Any] =mixed_qkv[:qkv_dim]
snake_case: Any =mixed_qkv[qkv_dim : qkv_dim * 2]
snake_case: Dict =mixed_qkv[qkv_dim * 2 :]
snake_case: Union[str, Any] =query_layer
snake_case: Optional[Any] =key_layer
snake_case: Union[str, Any] =value_layer
else:
snake_case: List[str] =value
return model_state_dict
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]:
"""simple docstring"""
snake_case: str =init_clap(__UpperCAmelCase , enable_fusion=__UpperCAmelCase )
clap_model.eval()
snake_case: Dict =clap_model.state_dict()
snake_case: Any =rename_state_dict(__UpperCAmelCase )
snake_case: Tuple =ClapConfig()
snake_case: List[Any] =enable_fusion
snake_case: List[Any] =ClapModel(__UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
transformers_config.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 700
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class a_ ( snake_case ):
UpperCAmelCase : str = """xlm"""
UpperCAmelCase : Dict = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : Optional[Any] , a_ : List[Any]=3_0_1_4_5 , a_ : List[Any]=2_0_4_8 , a_ : Dict=1_2 , a_ : Any=1_6 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.1 , a_ : Optional[Any]=True , a_ : Optional[int]=False , a_ : Dict=False , a_ : List[str]=False , a_ : Tuple=1 , a_ : List[Any]=True , a_ : Any=5_1_2 , a_ : Optional[int]=2_0_4_8**-0.5 , a_ : str=1E-1_2 , a_ : Any=0.0_2 , a_ : List[Any]=0 , a_ : Tuple=1 , a_ : Optional[Any]=2 , a_ : Optional[Any]=3 , a_ : Union[str, Any]=5 , a_ : List[Any]=True , a_ : Optional[int]="first" , a_ : List[str]=True , a_ : Optional[int]=None , a_ : Dict=True , a_ : Tuple=0.1 , a_ : Dict=5 , a_ : Optional[int]=5 , a_ : List[Any]=0 , a_ : int=0 , a_ : Optional[int]=2 , a_ : Tuple=0 , **a_ : Tuple , ) -> Dict:
snake_case: Dict =vocab_size
snake_case: Dict =emb_dim
snake_case: Optional[Any] =n_layers
snake_case: Dict =n_heads
snake_case: int =dropout
snake_case: str =attention_dropout
snake_case: Optional[int] =gelu_activation
snake_case: Dict =sinusoidal_embeddings
snake_case: int =causal
snake_case: str =asm
snake_case: Dict =n_langs
snake_case: List[Any] =use_lang_emb
snake_case: int =layer_norm_eps
snake_case: Dict =bos_index
snake_case: Union[str, Any] =eos_index
snake_case: Any =pad_index
snake_case: List[str] =unk_index
snake_case: List[Any] =mask_index
snake_case: Optional[int] =is_encoder
snake_case: Tuple =max_position_embeddings
snake_case: int =embed_init_std
snake_case: Union[str, Any] =init_std
snake_case: Optional[Any] =summary_type
snake_case: str =summary_use_proj
snake_case: Any =summary_activation
snake_case: int =summary_proj_to_labels
snake_case: str =summary_first_dropout
snake_case: Dict =start_n_top
snake_case: Tuple =end_n_top
snake_case: Union[str, Any] =mask_token_id
snake_case: int =lang_id
if "n_words" in kwargs:
snake_case: Dict =kwargs['n_words']
super().__init__(pad_token_id=a_ , bos_token_id=a_ , **a_ )
class a_ ( snake_case ):
@property
def UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case: Tuple ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case: int ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 347
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.