code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=4 , snake_case="gelu" , snake_case=0.0 , snake_case=0.1 , snake_case=True , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> int:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_multiple_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = weight_tying
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self ) -> Dict:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Any:
_UpperCAmelCase = GPTNeoXJapaneseModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , attention_mask=snake_case )
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = True
_UpperCAmelCase = GPTNeoXJapaneseModel(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> Any:
_UpperCAmelCase = GPTNeoXJapaneseForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = True
_UpperCAmelCase = GPTNeoXJapaneseForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
_UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
_UpperCAmelCase = output_from_no_past['hidden_states'][0]
_UpperCAmelCase = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_UpperCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = GPTNeoXJapaneseModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
@slow
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = 'abeja/gpt-neox-japanese-2.7b'
_UpperCAmelCase = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
_UpperCAmelCase = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
_UpperCAmelCase = GPTNeoXJapaneseTokenizer.from_pretrained(snake_case )
_UpperCAmelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(snake_case )
_UpperCAmelCase = []
for prompt in prompts:
_UpperCAmelCase = tokenizer(snake_case , return_tensors='pt' ).input_ids
_UpperCAmelCase = model.generate(snake_case , max_length=50 )
_UpperCAmelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
predicted_outputs += generated_string
self.assertListEqual(snake_case , snake_case )
| 573
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase = logging.getLogger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''masked_bert'''
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="topK" , snake_case="constant" , snake_case=0.0 , **snake_case , ) -> str:
super().__init__(pad_token_id=snake_case , **snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = pruning_method
_UpperCAmelCase = mask_init
_UpperCAmelCase = mask_scale
| 573
| 1
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__:Dict = """▁"""
SCREAMING_SNAKE_CASE__:Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = BigBirdTokenizer
_snake_case : Optional[Any] = BigBirdTokenizerFast
_snake_case : List[str] = True
_snake_case : Tuple = True
def a__ ( self ):
super().setUp()
__a = self.tokenizer_class(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ):
__a = "<s>"
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def a__ ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def a__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def a__ ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = "I was born in 92000, and this is falsé."
__a = tokenizer.tokenize(lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = BigBirdTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [285, 46, 10, 170, 382] , )
__a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__a = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def a__ ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def a__ ( self ):
__a = "Hello World!"
__a = [65, 18536, 2260, 101, 66]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def a__ ( self ):
__a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__a = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def a__ ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a = " ".join(lowerCamelCase )
__a = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
__a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
__a = BigBirdConfig(attention_type="original_full" )
__a = BigBirdModel(lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def a__ ( self ):
__a = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__a = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def a__ ( self ):
__a = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 701
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67
| 0
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Tuple = 0
while len(UpperCamelCase__ ) > 1:
__magic_name__ : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__magic_name__ : List[str] = files.index(min(UpperCamelCase__ ) )
temp += files[min_index]
files.pop(UpperCamelCase__ )
files.append(UpperCamelCase__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 436
|
'''simple docstring'''
class UpperCAmelCase :
def __init__( self : List[str] , __snake_case : str ) -> Union[str, Any]:
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def lowercase__ ( self : Optional[Any] , __snake_case : int ) -> Optional[int]:
if self.val:
if val < self.val:
if self.left is None:
_lowerCAmelCase = Node(__snake_case )
else:
self.left.insert(__snake_case )
elif val > self.val:
if self.right is None:
_lowerCAmelCase = Node(__snake_case )
else:
self.right.insert(__snake_case )
else:
_lowerCAmelCase = val
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if root:
inorder(root.left , lowerCAmelCase )
res.append(root.val )
inorder(root.right , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) == 0:
return arr
_lowerCAmelCase = Node(arr[0] )
for i in range(1 , len(lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_lowerCAmelCase = []
inorder(lowerCAmelCase , lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 207
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class snake_case__ (__UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = '''wavlm'''
def __init__( self : int , __lowerCamelCase : int=32 , __lowerCamelCase : Optional[int]=7_68 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : List[Any]=30_72 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Optional[int]=1e-5 , __lowerCamelCase : int="group" , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCamelCase : int=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase : Any=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=1_28 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Dict=3_20 , __lowerCamelCase : str=8_00 , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=0.05 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Tuple=10 , __lowerCamelCase : str=3_20 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[int]=1_00 , __lowerCamelCase : Optional[Any]=2_56 , __lowerCamelCase : List[str]=2_56 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : List[str]="mean" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=2_56 , __lowerCamelCase : Any=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCamelCase : List[str]=(5, 3, 3, 1, 1) , __lowerCamelCase : List[str]=(1, 2, 3, 1, 1) , __lowerCamelCase : Optional[Any]=5_12 , __lowerCamelCase : Any=80 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : int=1 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=None , **__lowerCamelCase : int , ) -> Any:
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(__SCREAMING_SNAKE_CASE )
a = list(__SCREAMING_SNAKE_CASE )
a = list(__SCREAMING_SNAKE_CASE )
a = conv_bias
a = num_buckets
a = max_bucket_distance
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layerdrop
a = layer_norm_eps
a = initializer_range
a = num_ctc_classes
a = vocab_size
a = do_stable_layer_norm
a = use_weighted_layer_sum
a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
# parameters for pretraining with codevector quantized representations
a = num_codevectors_per_group
a = num_codevector_groups
a = contrastive_logits_temperature
a = num_negatives
a = codevector_dim
a = proj_codevector_dim
a = diversity_loss_weight
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# adapter
a = add_adapter
a = adapter_kernel_size
a = adapter_stride
a = num_adapter_layers
a = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a = list(__SCREAMING_SNAKE_CASE )
a = list(__SCREAMING_SNAKE_CASE )
a = list(__SCREAMING_SNAKE_CASE )
a = xvector_output_dim
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 716
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : str = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = "wav2vec2"
def __init__( self : Any , snake_case__ : Any=32 , snake_case__ : Optional[int]=768 , snake_case__ : Any=12 , snake_case__ : Dict=12 , snake_case__ : Any=3072 , snake_case__ : Any="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[str]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Any=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[str]=0.02 , snake_case__ : Union[str, Any]=1E-5 , snake_case__ : Dict="group" , snake_case__ : int="gelu" , snake_case__ : Optional[int]=(512, 512, 512, 512, 512, 512, 512) , snake_case__ : int=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Tuple=False , snake_case__ : Dict=128 , snake_case__ : List[str]=16 , snake_case__ : int=False , snake_case__ : Dict=True , snake_case__ : List[Any]=0.05 , snake_case__ : List[str]=10 , snake_case__ : int=2 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=10 , snake_case__ : Tuple=0 , snake_case__ : Any=320 , snake_case__ : Union[str, Any]=2 , snake_case__ : str=0.1 , snake_case__ : List[Any]=100 , snake_case__ : List[Any]=256 , snake_case__ : int=256 , snake_case__ : List[str]=0.1 , snake_case__ : int="sum" , snake_case__ : Dict=False , snake_case__ : Any=False , snake_case__ : Optional[Any]=256 , snake_case__ : Optional[Any]=(512, 512, 512, 512, 1500) , snake_case__ : str=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Any=512 , snake_case__ : int=0 , snake_case__ : str=1 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=False , snake_case__ : Dict=3 , snake_case__ : Dict=2 , snake_case__ : Optional[int]=3 , snake_case__ : Tuple=None , snake_case__ : Optional[int]=None , **snake_case__ : Union[str, Any] , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = feat_extract_norm
lowerCAmelCase__ = feat_extract_activation
lowerCAmelCase__ = list(snake_case__ )
lowerCAmelCase__ = list(snake_case__ )
lowerCAmelCase__ = list(snake_case__ )
lowerCAmelCase__ = conv_bias
lowerCAmelCase__ = num_conv_pos_embeddings
lowerCAmelCase__ = num_conv_pos_embedding_groups
lowerCAmelCase__ = len(self.conv_dim )
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = feat_proj_dropout
lowerCAmelCase__ = final_dropout
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = do_stable_layer_norm
lowerCAmelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ = apply_spec_augment
lowerCAmelCase__ = mask_time_prob
lowerCAmelCase__ = mask_time_length
lowerCAmelCase__ = mask_time_min_masks
lowerCAmelCase__ = mask_feature_prob
lowerCAmelCase__ = mask_feature_length
lowerCAmelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ = num_codevectors_per_group
lowerCAmelCase__ = num_codevector_groups
lowerCAmelCase__ = contrastive_logits_temperature
lowerCAmelCase__ = feat_quantizer_dropout
lowerCAmelCase__ = num_negatives
lowerCAmelCase__ = codevector_dim
lowerCAmelCase__ = proj_codevector_dim
lowerCAmelCase__ = diversity_loss_weight
# ctc loss
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# adapter
lowerCAmelCase__ = add_adapter
lowerCAmelCase__ = adapter_kernel_size
lowerCAmelCase__ = adapter_stride
lowerCAmelCase__ = num_adapter_layers
lowerCAmelCase__ = output_hidden_size or hidden_size
lowerCAmelCase__ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ = list(snake_case__ )
lowerCAmelCase__ = list(snake_case__ )
lowerCAmelCase__ = list(snake_case__ )
lowerCAmelCase__ = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 644
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCAmelCase : Optional[int] = None
try:
import msvcrt
except ImportError:
__lowerCAmelCase : List[Any] = None
try:
import fcntl
except ImportError:
__lowerCAmelCase : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCAmelCase : int = OSError
# Data
# ------------------------------------------------
__lowerCAmelCase : Dict = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__lowerCAmelCase : str = "3.0.12"
__lowerCAmelCase : Optional[int] = None
def _UpperCAmelCase ( ):
"""simple docstring"""
global _logger
lowerCAmelCase__ = _logger or logging.getLogger(__name__ )
return _logger
class a_ ( __UpperCamelCase ):
def __init__( self : str , snake_case__ : Optional[int] ):
lowerCAmelCase__ = lock_file
return None
def __str__( self : Any ):
lowerCAmelCase__ = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class a_ :
def __init__( self : Tuple , snake_case__ : Optional[int] ):
lowerCAmelCase__ = lock
return None
def __enter__( self : Union[str, Any] ):
return self.lock
def __exit__( self : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] ):
self.lock.release()
return None
class a_ :
def __init__( self : Dict , snake_case__ : Tuple , snake_case__ : Dict=-1 , snake_case__ : Dict=None ):
lowerCAmelCase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowerCAmelCase__ = self.hash_filename_if_too_long(snake_case__ , snake_case__ )
# The path to the lock file.
lowerCAmelCase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCAmelCase__ = None
# The default timeout value.
lowerCAmelCase__ = timeout
# We use this lock primarily for the lock counter.
lowerCAmelCase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCAmelCase__ = 0
return None
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return self._lock_file
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self._timeout
@timeout.setter
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : str ):
lowerCAmelCase__ = float(snake_case__ )
return None
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
raise NotImplementedError()
def _SCREAMING_SNAKE_CASE ( self : str ):
raise NotImplementedError()
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
return self._lock_file_fd is not None
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowerCAmelCase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCAmelCase__ = id(self )
lowerCAmelCase__ = self._lock_file
lowerCAmelCase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(snake_case__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCAmelCase__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCAmelCase__ = id(self )
lowerCAmelCase__ = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
lowerCAmelCase__ = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : Optional[Any] ):
self.acquire()
return self
def __exit__( self : Any , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
self.release()
return None
def __del__( self : Optional[int] ):
self.release(force=snake_case__ )
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : str , snake_case__ : int ):
lowerCAmelCase__ = os.path.basename(snake_case__ )
if len(snake_case__ ) > max_length and max_length > 0:
lowerCAmelCase__ = os.path.dirname(snake_case__ )
lowerCAmelCase__ = str(hash(snake_case__ ) )
lowerCAmelCase__ = filename[: max_length - len(snake_case__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(snake_case__ , snake_case__ )
else:
return path
class a_ ( __UpperCamelCase ):
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : str=-1 , snake_case__ : Tuple=None ):
from .file_utils import relative_to_absolute_path
super().__init__(snake_case__ , timeout=snake_case__ , max_filename_length=snake_case__ )
lowerCAmelCase__ = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCAmelCase__ = os.open(self._lock_file , snake_case__ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case__ )
else:
lowerCAmelCase__ = fd
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self._lock_file_fd
lowerCAmelCase__ = None
msvcrt.locking(snake_case__ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a_ ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=-1 , snake_case__ : List[str]=None ):
lowerCAmelCase__ = os.statvfs(os.path.dirname(snake_case__ ) ).f_namemax
super().__init__(snake_case__ , timeout=snake_case__ , max_filename_length=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCAmelCase__ = os.open(self._lock_file , snake_case__ )
try:
fcntl.flock(snake_case__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case__ )
else:
lowerCAmelCase__ = fd
return None
def _SCREAMING_SNAKE_CASE ( self : str ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowerCAmelCase__ = self._lock_file_fd
lowerCAmelCase__ = None
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
os.close(snake_case__ )
return None
class a_ ( __UpperCamelCase ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCAmelCase__ = os.open(self._lock_file , snake_case__ )
except OSError:
pass
else:
lowerCAmelCase__ = fd
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
os.close(self._lock_file_fd )
lowerCAmelCase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCAmelCase : Optional[int] = None
if msvcrt:
__lowerCAmelCase : Tuple = WindowsFileLock
elif fcntl:
__lowerCAmelCase : Optional[int] = UnixFileLock
else:
__lowerCAmelCase : Tuple = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 644
| 1
|
import math
def lowercase_ ( A__ , A__ ) -> float:
"""simple docstring"""
return math.pow(A__ , 2 ) - a
def lowercase_ ( A__ ) -> float:
"""simple docstring"""
return 2 * x
def lowercase_ ( A__ ) -> float:
"""simple docstring"""
snake_case = 2.0
while start <= a:
snake_case = math.pow(A__ , 2 )
return start
def lowercase_ ( A__ , A__ = 9999 , A__ = 0.00000000000001 ) -> float:
"""simple docstring"""
if a < 0:
raise ValueError("math domain error" )
snake_case = get_initial_point(A__ )
for _ in range(A__ ):
snake_case = value
snake_case = value - fx(A__ , A__ ) / fx_derivative(A__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294
|
_A = 0 # The first color of the flag.
_A = 1 # The second color of the flag.
_A = 2 # The third color of the flag.
_A = (red, white, blue)
def lowercase_ ( A__ ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(A__ ) == 1:
return list(A__ )
snake_case = 0
snake_case = len(A__ ) - 1
snake_case = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case , snake_case = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case , snake_case = sequence[high], sequence[mid]
high -= 1
else:
snake_case = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(A__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("Enter numbers separated by commas:\n").strip()
_A = [int(item.strip()) for item in user_input.split(",")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 294
| 1
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __A (__magic_name__ ):
@staticmethod
def _snake_case ( UpperCamelCase_ ):
__UpperCAmelCase : Dict = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase_ , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = model
__UpperCAmelCase : int = cache
__UpperCAmelCase : Union[str, Any] = force
__UpperCAmelCase : Optional[int] = trust_remote_code
def _snake_case ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 168
| 0
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any ={
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class A_ ( __a ):
_A :List[str] = '''encodec'''
def __init__( self : List[Any] , snake_case__ : str=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case__ : int=2_40_00 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=None , snake_case__ : Optional[int]=None , snake_case__ : str=1_28 , snake_case__ : Tuple=32 , snake_case__ : Optional[int]=1 , snake_case__ : Dict=[8, 5, 4, 2] , snake_case__ : List[Any]="weight_norm" , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]=7 , snake_case__ : List[Any]=3 , snake_case__ : Optional[int]=2 , snake_case__ : List[str]=True , snake_case__ : str="reflect" , snake_case__ : int=2 , snake_case__ : Union[str, Any]=2 , snake_case__ : str=1.0 , snake_case__ : str=10_24 , snake_case__ : str=None , snake_case__ : List[Any]=True , **snake_case__ : Any , ):
lowercase = target_bandwidths
lowercase = sampling_rate
lowercase = audio_channels
lowercase = normalize
lowercase = chunk_length_s
lowercase = overlap
lowercase = hidden_size
lowercase = num_filters
lowercase = num_residual_layers
lowercase = upsampling_ratios
lowercase = norm_type
lowercase = kernel_size
lowercase = last_kernel_size
lowercase = residual_kernel_size
lowercase = dilation_growth_rate
lowercase = use_causal_conv
lowercase = pad_mode
lowercase = compress
lowercase = num_lstm_layers
lowercase = trim_right_ratio
lowercase = codebook_size
lowercase = codebook_dim if codebook_dim is not None else hidden_size
lowercase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**snake_case__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 715
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
_A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = 0
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
class A_ ( __a ):
_A :List[str] = False
class A_ ( __a ):
_A :Dict = False
class A_ ( __a ):
_A :Union[str, Any] = '''AutoFeatureExtractor'''
_A :Tuple = '''AutoTokenizer'''
_A :Optional[Any] = False
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class A_ ( unittest.TestCase ):
_A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ):
lowercase = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 72
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ):
lowercase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
lowercase__ = 2
# Initialize accelerator
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowercase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowercase__ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 413
|
class _snake_case :
def __init__( self : Optional[int], __lowercase : int ):
lowercase__ = size
lowercase__ = [0] * size
lowercase__ = [0] * size
@staticmethod
def A__ ( __lowercase : int ):
return index | (index + 1)
@staticmethod
def A__ ( __lowercase : int ):
return (index & (index + 1)) - 1
def A__ ( self : Optional[Any], __lowercase : int, __lowercase : int ):
lowercase__ = value
while index < self.size:
lowercase__ = self.get_prev(__lowercase ) + 1
if current_left_border == index:
lowercase__ = value
else:
lowercase__ = max(__lowercase, __lowercase, __lowercase )
lowercase__ = self.get_next(__lowercase )
def A__ ( self : List[Any], __lowercase : int, __lowercase : int ):
right -= 1 # Because of right is exclusive
lowercase__ = 0
while left <= right:
lowercase__ = self.get_prev(__lowercase )
if left <= current_left:
lowercase__ = max(__lowercase, self.tree[right] )
lowercase__ = current_left
else:
lowercase__ = max(__lowercase, self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413
| 1
|
'''simple docstring'''
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = len(__UpperCamelCase )
a__ = len(__UpperCamelCase )
a__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
a__ = []
for char_count in range(__UpperCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ : Optional[int] = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Dict = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 394
| 0
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''vocab.txt'''}
a = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
a = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( _snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with open(_snake_case , 'r' ) as f:
_A = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : List[str]="<eos>" , **_UpperCAmelCase : Dict , ):
super().__init__(**_UpperCAmelCase )
_A = load_vocab_file(_UpperCAmelCase )
_A = dict(enumerate(self.all_tokens ) )
_A = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_A = unk_token
_A = cls_token
_A = pad_token
_A = mask_token
_A = eos_token
_A = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int ):
return self._id_to_token.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ):
return self._token_to_id.get(_UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int , **_UpperCAmelCase : List[str] ):
return text.split()
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str]=False ):
return len(self._id_to_token )
def lowerCAmelCase_ ( self : Optional[int] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ):
return self._token_to_id.get(_UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int ):
return self._id_to_token.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.cls_token_id]
_A = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_A = [1] + ([0] * len(_UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_UpperCAmelCase ) + [1]
return mask
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Any ):
_A = os.path.join(_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(_UpperCAmelCase , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCAmelCase_ ( self : int ):
return self.get_vocab_size(with_added_tokens=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[List[str], List[AddedToken]] , _UpperCAmelCase : bool = False ):
return super()._add_tokens(_UpperCAmelCase , special_tokens=_UpperCAmelCase )
| 7
|
'''simple docstring'''
_lowerCAmelCase :Union[str, Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
_lowerCAmelCase :Union[str, Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( a_ , a_ , a_ ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = from_type.lower().strip('s' )
SCREAMING_SNAKE_CASE : Union[str, Any] = to_type.lower().strip('s' )
SCREAMING_SNAKE_CASE : Dict = UNIT_SYMBOL.get(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = UNIT_SYMBOL.get(a_ , a_ )
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE : Any = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a_ )}"""
)
raise ValueError(a_ )
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE : int = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a_ )}"""
)
raise ValueError(a_ )
SCREAMING_SNAKE_CASE : Dict = METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE : List[str] = METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE : Dict = 1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE : Any = from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE : Tuple = -(to_exponent - from_exponent)
return value * pow(10 , a_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251
| 0
|
def __lowerCamelCase ( A__ : float , A__ : float ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 704
|
import argparse
snake_case__ : Dict = 'docs/source/_static/js/custom.js'
def __lowerCamelCase ( A__ : List[str] ) -> int:
with open(A__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ : List[Any] = f.readlines()
lowerCamelCase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowerCamelCase_ : int = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(A__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(A__ )
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
snake_case__ : Tuple = parser.parse_args()
update_custom_js(args.version)
| 171
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline
_SCREAMING_SNAKE_CASE = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_SCREAMING_SNAKE_CASE = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_SCREAMING_SNAKE_CASE = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_SCREAMING_SNAKE_CASE = False
@property
def _snake_case ( self ) -> Union[str, Any]:
return 32
@property
def _snake_case ( self ) -> Optional[Any]:
return 32
@property
def _snake_case ( self ) -> Tuple:
return self.time_input_dim
@property
def _snake_case ( self ) -> Dict:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> int:
return 100
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase = UNetaDConditionModel(**lowercase )
return model
@property
def _snake_case ( self ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase , )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> List[str]:
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowerCAmelCase = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase = 0
if str(lowercase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase )
else:
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCAmelCase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self ) -> Any:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
lowerCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _snake_case ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Dict:
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase = 0
lowerCAmelCase = """a hat"""
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase = pipeline(
image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 532
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'speech_to_text_2'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase=10_000 , lowercase=6 , lowercase=2_048 , lowercase=4 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1_024 , **lowercase , ) -> Optional[int]:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 532
| 1
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self : str ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE__ = KarrasVeScheduler()
SCREAMING_SNAKE_CASE__ = KarrasVePipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(num_inference_steps=2 , generator=__lowerCamelCase , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(num_inference_steps=2 , generator=__lowerCamelCase , output_type='''numpy''' , return_dict=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''google/ncsnpp-celebahq-256'''
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = KarrasVeScheduler()
SCREAMING_SNAKE_CASE__ = KarrasVePipeline(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(num_inference_steps=20 , generator=__lowerCamelCase , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 715
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : int=99 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : Dict=None , ) -> int:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Dict ) -> Any:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def lowercase_ ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = MobileBertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = MobileBertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = MobileBertForNextSentencePrediction(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> int:
SCREAMING_SNAKE_CASE__ = MobileBertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , next_sentence_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = MobileBertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MobileBertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MobileBertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = MobileBertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
a = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a = True
def lowercase_ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]=False ) -> str:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowercase_ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = MobileBertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def lowercase_ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_SCREAMING_SNAKE_CASE : Optional[Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=__lowerCamelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
SCREAMING_SNAKE_CASE__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
SCREAMING_SNAKE_CASE__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 472
| 0
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def lowerCamelCase__ ( A_ , A_ ):
# For applying gaussian function for each element in matrix.
UpperCAmelCase_ = math.sqrt(__UpperCamelCase )
UpperCAmelCase_ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCamelCase__ ( A_ , A_ ):
# Creates a gaussian kernel of given dimension.
UpperCAmelCase_ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __UpperCamelCase ):
for j in range(0 , __UpperCamelCase ):
UpperCAmelCase_ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , ):
UpperCAmelCase_ = np.zeros(img.shape )
UpperCAmelCase_ = get_gauss_kernel(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
UpperCAmelCase_ = get_slice(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = img_s - img_s[kernel_size // 2, kernel_size // 2]
UpperCAmelCase_ = vec_gaussian(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = np.multiply(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = np.multiply(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = np.sum(__UpperCamelCase ) / np.sum(__UpperCamelCase )
UpperCAmelCase_ = val
return imga
def lowerCamelCase__ ( A_ ):
UpperCAmelCase_ = args[1] if args[1:] else "../image_data/lena.jpg"
UpperCAmelCase_ = float(args[2] ) if args[2:] else 1.0
UpperCAmelCase_ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
UpperCAmelCase_ = int(args[4] )
UpperCAmelCase_ = kernel_size + abs(kernel_size % 2 - 1 )
else:
UpperCAmelCase_ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = parse_args(sys.argv)
__snake_case : List[str] = cva.imread(filename, 0)
cva.imshow('''input image''', img)
__snake_case : List[str] = img / 2_55
__snake_case : Optional[Any] = out.astype('''float32''')
__snake_case : List[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__snake_case : int = out * 2_55
__snake_case : int = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 660
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =ArgumentParser("""Diffusers CLI tool""", usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE__ =parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
SCREAMING_SNAKE_CASE__ =parser.parse_args()
if not hasattr(__UpperCamelCase, """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ =args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 151
| 0
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : List[str] = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_snake_case : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""mask2former"""
SCREAMING_SNAKE_CASE__ =["""swin"""]
SCREAMING_SNAKE_CASE__ ={"""hidden_size""": """hidden_dim"""}
def __init__( self, _a = None, _a = 2_56, _a = 2_56, _a = 2_56, _a = 10_24, _a = "relu", _a = 6, _a = 10, _a = 8, _a = 0.0, _a = 20_48, _a = False, _a = False, _a = 4, _a = 2_55, _a = 1_00, _a = 0.1, _a = 2.0, _a = 5.0, _a = 5.0, _a = 1_25_44, _a = 3.0, _a = 0.75, _a = 0.02, _a = 1.0, _a = True, _a = [4, 8, 16, 32], _a = None, **_a, ) -> List[Any]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["swin"](
image_size=2_24, in_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=_a, out_features=["stage1", "stage2", "stage3", "stage4"], )
if isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("model_type" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = mask_feature_size
__SCREAMING_SNAKE_CASE = hidden_dim
__SCREAMING_SNAKE_CASE = encoder_feedforward_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = dim_feedforward
__SCREAMING_SNAKE_CASE = pre_norm
__SCREAMING_SNAKE_CASE = enforce_input_projection
__SCREAMING_SNAKE_CASE = common_stride
__SCREAMING_SNAKE_CASE = ignore_value
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = no_object_weight
__SCREAMING_SNAKE_CASE = class_weight
__SCREAMING_SNAKE_CASE = mask_weight
__SCREAMING_SNAKE_CASE = dice_weight
__SCREAMING_SNAKE_CASE = train_num_points
__SCREAMING_SNAKE_CASE = oversample_ratio
__SCREAMING_SNAKE_CASE = importance_sample_ratio
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = use_auxiliary_loss
__SCREAMING_SNAKE_CASE = feature_strides
__SCREAMING_SNAKE_CASE = output_auxiliary_logits
__SCREAMING_SNAKE_CASE = decoder_layers
super().__init__(**_a )
@classmethod
def __lowerCAmelCase ( cls, _a, **_a ) -> int:
return cls(
backbone_config=_a, **_a, )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 214
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def _A ( __snake_case :str ) -> int:
"""simple docstring"""
for char in word:
__SCREAMING_SNAKE_CASE = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def _A ( __snake_case :List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
for token in tokens:
__SCREAMING_SNAKE_CASE = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
__SCREAMING_SNAKE_CASE = list(__snake_case )
return word_list
def _A ( __snake_case :List[str] , __snake_case :set() ) -> Any:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE = max([len(__snake_case ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE = bert_tokens
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, len(__snake_case )
while start < end:
__SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
__SCREAMING_SNAKE_CASE = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE = "##" + bert_word[j]
__SCREAMING_SNAKE_CASE = start + i
__SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def _A ( __snake_case :List[str] , __snake_case :LTP , __snake_case :BertTokenizer ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__SCREAMING_SNAKE_CASE = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = []
for id in input_ids:
__SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
__SCREAMING_SNAKE_CASE = add_sub_symbol(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def _A ( __snake_case :Tuple ) -> Any:
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
_snake_case : Union[str, Any] = parser.parse_args()
main(args)
| 214
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 95
|
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52
| 0
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '▁'
A = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Dict = BigBirdTokenizer
lowerCAmelCase_ : Dict = BigBirdTokenizerFast
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[str] = True
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
A = self.tokenizer_class(snake_case , keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
A = '<s>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(snake_case ) , 1_004 )
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'I was born in 92000, and this is falsé.'
A = tokenizer.tokenize(snake_case )
A = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
A = tokenizer.encode(snake_case , add_special_tokens=snake_case )
A = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
A = self.get_rust_tokenizer()
A = tokenizer.encode(snake_case )
A = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
def A_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A = BigBirdTokenizer(snake_case , keep_accents=snake_case )
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [285, 46, 10, 170, 382] , )
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A = 'Hello World!'
A = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@slow
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
A = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@require_torch
@slow
def A_ ( self : int ) -> Any:
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
A = list(self.big_tokenizer.get_vocab().keys() )[:10]
A = ' '.join(snake_case )
A = self.big_tokenizer.encode_plus(snake_case , return_tensors='pt' , return_token_type_ids=snake_case )
A = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=snake_case )
A = BigBirdConfig(attention_type='original_full' )
A = BigBirdModel(snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case )
model(**snake_case )
@slow
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
A = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A = {'input_ids': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 721
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Tuple = LayoutLMTokenizer
lowerCAmelCase_ : Any = LayoutLMTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = True
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : str , **snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self : List[str] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
| 109
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ :List[str] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """perceiver"""
def __init__(self , __a=256 , __a=1280 , __a=768 , __a=1 , __a=26 , __a=8 , __a=8 , __a=None , __a=None , __a="kv" , __a=1 , __a=1 , __a="gelu" , __a=0.1 , __a=0.02 , __a=1E-1_2 , __a=True , __a=262 , __a=2048 , __a=56 , __a=[368, 496] , __a=16 , __a=1920 , __a=16 , __a=[1, 16, 224, 224] , **__a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase__ = num_latents
UpperCAmelCase__ = d_latents
UpperCAmelCase__ = d_model
UpperCAmelCase__ = num_blocks
UpperCAmelCase__ = num_self_attends_per_block
UpperCAmelCase__ = num_self_attention_heads
UpperCAmelCase__ = num_cross_attention_heads
UpperCAmelCase__ = qk_channels
UpperCAmelCase__ = v_channels
UpperCAmelCase__ = cross_attention_shape_for_attention
UpperCAmelCase__ = self_attention_widening_factor
UpperCAmelCase__ = cross_attention_widening_factor
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_query_residual
# masked language modeling attributes
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
# image classification attributes
UpperCAmelCase__ = image_size
# flow attributes
UpperCAmelCase__ = train_size
# multimodal autoencoding attributes
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = audio_samples_per_frame
UpperCAmelCase__ = samples_per_patch
UpperCAmelCase__ = output_shape
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCamelCase__ (self ) -> float:
"""simple docstring"""
return 1E-4
def UpperCamelCase__ (self , __a , __a = -1 , __a = -1 , __a = -1 , __a = False , __a = None , __a = 3 , __a = 40 , __a = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase__ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase__ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase__ = inputs.pop('input_ids' )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase__ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase__ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase__ = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 146
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCamelCase : Tuple = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def A__ ( _a : Optional[Any] , _a : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
snake_case__ : Optional[int] =path + """.py"""
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def A__ ( _a : Tuple , _a : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
snake_case__ : Union[str, Any] =path + """.py"""
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( _a : Optional[Any] , _a : Any , _a : int ):
'''simple docstring'''
snake_case__ : str =get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( _a : Any , _a : List[str] , _a : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def A__ ( _a : Any , _a : Optional[Any] ):
'''simple docstring'''
snake_case__ : List[str] =get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def A__ ( _a : str , _a : Optional[int] , _a : int ):
'''simple docstring'''
snake_case__ : str =get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
snake_case__ : str =expected_configs[0]
assert expected_config in infos
snake_case__ : int =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( _a : str , _a : int , _a : str ):
'''simple docstring'''
snake_case__ : Tuple =get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
snake_case__ : List[str] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( _a : Any , _a : Optional[int] , _a : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 721
|
def A__ ( _a : Optional[Any] , _a : Tuple , _a : List[str]=False ):
'''simple docstring'''
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case__ : int =len(set_a.intersection(_a ) )
if alternative_union:
snake_case__ : int =len(_a ) + len(_a )
else:
snake_case__ : Tuple =len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case__ : Optional[int] =[element for element in set_a if element in set_b]
if alternative_union:
snake_case__ : Optional[Any] =len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case__ : Dict =set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
__lowerCamelCase : List[Any] = {"""a""", """b""", """c""", """d""", """e"""}
__lowerCamelCase : Optional[int] = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 448
| 0
|
def lowerCamelCase_ ( lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase__ )
if number < 1:
lowerCamelCase_ = F'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase__ )
lowerCamelCase_ = 1
for i in range(1 , lowerCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=99 , lowercase=13 , lowercase=7 , lowercase=9 , lowercase=True , lowercase=True , lowercase=False , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=8 , lowercase=0.1 , lowercase=0.0_0_2 , lowercase=1 , lowercase=0 , lowercase=0 , lowercase=None , lowercase=None , ) -> Tuple:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = encoder_seq_length
lowerCamelCase_ = decoder_seq_length
# For common tests
lowerCamelCase_ = self.decoder_seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = d_ff
lowerCamelCase_ = relative_attention_num_buckets
lowerCamelCase_ = dropout_rate
lowerCamelCase_ = initializer_factor
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = decoder_start_token_id
lowerCamelCase_ = None
lowerCamelCase_ = decoder_layers
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return TaConfig.from_pretrained("google/umt5-base" )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> str:
if attention_mask is None:
lowerCamelCase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase )
if decoder_head_mask is None:
lowerCamelCase_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase )
if cross_attn_head_mask is None:
lowerCamelCase_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase_ = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = config.num_attention_heads
lowerCamelCase_ = self.prepare_inputs_dict(lowercase , lowercase , lowercase )
return config, input_dict
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
lowerCamelCase_ = UMTaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(
input_ids=lowercase , decoder_input_ids=lowercase , attention_mask=lowercase , decoder_attention_mask=lowercase , )
lowerCamelCase_ = model(input_ids=lowercase , decoder_input_ids=lowercase )
lowerCamelCase_ = result.last_hidden_state
lowerCamelCase_ = result.past_key_values
lowerCamelCase_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[int]:
lowerCamelCase_ = UMTaModel(config=lowercase ).get_decoder().to(lowercase ).eval()
# first forward pass
lowerCamelCase_ = model(lowercase , use_cache=lowercase )
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase , use_cache=lowercase )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = model(lowercase )["last_hidden_state"]
lowerCamelCase_ = model(lowercase , past_key_values=lowercase )["last_hidden_state"]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = UMTaModel(config=lowercase ).to(lowercase ).half().eval()
lowerCamelCase_ = model(**lowercase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowercase ).any().item() )
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase__ = [0.8, 0.9]
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase_ = UMTaModel(config_and_inputs[0] ).to(lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=lowercase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase_ = config_and_inputs[0]
lowerCamelCase_ = UMTaForConditionalGeneration(lowercase ).eval()
model.to(lowercase )
lowerCamelCase_ = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowercase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ),
}
for attn_name, (name, mask) in zip(lowercase , head_masking.items() ):
lowerCamelCase_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase )
lowerCamelCase_ = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowercase , return_dict_in_generate=lowercase , **lowercase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowercase ).to(lowercase )
lowerCamelCase_ = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowercase , legacy=lowercase )
lowerCamelCase_ = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
lowerCamelCase_ = tokenizer(lowercase , return_tensors="pt" , padding=lowercase ).input_ids
# fmt: off
lowerCamelCase_ = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase , lowercase )
lowerCamelCase_ = model.generate(input_ids.to(lowercase ) )
lowerCamelCase_ = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
lowerCamelCase_ = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , lowercase )
| 463
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Optional[Any] = []
_a : Tuple = len(__a )
for i in range(__a ):
_a : float = -1
for j in range(i + 1 , __a ):
if arr[i] < arr[j]:
_a : int = arr[j]
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : int = []
for i, outer in enumerate(__a ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : int = inner
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Union[str, Any] = len(__a )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : str = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 708
|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : int = '''AutoTokenizer'''
__UpperCAmelCase : Optional[Any] = ['''tokenizer''']
__UpperCAmelCase : str = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Dict=None ):
'''simple docstring'''
super().__init__(_a )
_a : List[str] = speaker_embeddings
@classmethod
def __lowercase ( cls : Any ,_a : Optional[int] ,_a : Union[str, Any]="speaker_embeddings_path.json" ,**_a : Union[str, Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_a : Tuple = get_file_from_repo(
_a ,_a ,subfolder=kwargs.pop('subfolder' ,_a ) ,cache_dir=kwargs.pop('cache_dir' ,_a ) ,force_download=kwargs.pop('force_download' ,_a ) ,proxies=kwargs.pop('proxies' ,_a ) ,resume_download=kwargs.pop('resume_download' ,_a ) ,local_files_only=kwargs.pop('local_files_only' ,_a ) ,use_auth_token=kwargs.pop('use_auth_token' ,_a ) ,revision=kwargs.pop('revision' ,_a ) ,)
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_a ,_a )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_a : List[Any] = None
else:
with open(_a ) as speaker_embeddings_json:
_a : List[str] = json.load(_a )
else:
_a : str = None
_a : Any = AutoTokenizer.from_pretrained(_a ,**_a )
return cls(tokenizer=_a ,speaker_embeddings=_a )
def __lowercase ( self : List[str] ,_a : Tuple ,_a : Any="speaker_embeddings_path.json" ,_a : Optional[int]="speaker_embeddings" ,_a : bool = False ,**_a : Optional[int] ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_a ,_a ,'v2' ) ,exist_ok=_a )
_a : Optional[Any] = {}
_a : List[str] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_a : Any = self._load_voice_preset(_a )
_a : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,_a ,F"""{prompt_key}_{key}""" ) ,voice_preset[key] ,allow_pickle=_a ,)
_a : Dict = os.path.join(_a ,F"""{prompt_key}_{key}.npy""" )
_a : Any = tmp_dict
with open(os.path.join(_a ,_a ) ,'w' ) as fp:
json.dump(_a ,_a )
super().save_pretrained(_a ,_a ,**_a )
def __lowercase ( self : Tuple ,_a : str = None ,**_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.speaker_embeddings[voice_preset]
_a : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_a : List[Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,_a ) ,cache_dir=kwargs.pop('cache_dir' ,_a ) ,force_download=kwargs.pop('force_download' ,_a ) ,proxies=kwargs.pop('proxies' ,_a ) ,resume_download=kwargs.pop('resume_download' ,_a ) ,local_files_only=kwargs.pop('local_files_only' ,_a ) ,use_auth_token=kwargs.pop('use_auth_token' ,_a ) ,revision=kwargs.pop('revision' ,_a ) ,)
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_a : Tuple = np.load(_a )
return voice_preset_dict
def __lowercase ( self : List[Any] ,_a : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Any ,_a : List[str]=None ,_a : Tuple=None ,_a : Tuple="pt" ,_a : Any=256 ,_a : Optional[Any]=False ,_a : List[str]=True ,_a : Optional[Any]=False ,**_a : Dict ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(_a ,_a ):
if (
isinstance(_a ,_a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_a : Union[str, Any] = self._load_voice_preset(_a )
else:
if isinstance(_a ,_a ) and not voice_preset.endswith('.npz' ):
_a : str = voice_preset + '.npz'
_a : Optional[int] = np.load(_a )
if voice_preset is not None:
self._validate_voice_preset_dict(_a ,**_a )
_a : List[str] = BatchFeature(data=_a ,tensor_type=_a )
_a : List[Any] = self.tokenizer(
_a ,return_tensors=_a ,padding='max_length' ,max_length=_a ,return_attention_mask=_a ,return_token_type_ids=_a ,add_special_tokens=_a ,**_a ,)
if voice_preset is not None:
_a : Dict = voice_preset
return encoded_text
| 319
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = LEDTokenizer
SCREAMING_SNAKE_CASE_ = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> str:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase_ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , max_length=len(SCREAMING_SNAKE_CASE_ ) , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE_ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('labels' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = ['A long paragraph for summarization.']
lowerCamelCase_ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = inputs['input_ids']
lowerCamelCase_ = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = ['Summary of the text.', 'Another summary.']
lowerCamelCase_ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [[0] * len(SCREAMING_SNAKE_CASE_ ) for x in encoded_output['input_ids']]
lowerCamelCase_ = tokenizer.pad(SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'A, <mask> AllenNLP sentence.'
lowerCamelCase_ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 42
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase_ ( _A , _A , _A , _A , _A = None , _A = None , _A = None , ):
'''simple docstring'''
if config_name_or_path is None:
SCREAMING_SNAKE_CASE__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = question_encoder_name_or_path
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE__ = RagConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = gen_config
SCREAMING_SNAKE_CASE__ = question_encoder_config
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : str = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 493
| 0
|
_a: List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a: list[bool | None] = [None] * 1000_0000
_a: Any = True
_a: Optional[Any] = False
def __lowerCAmelCase ( A ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ = chain(next_number(A ) )
UpperCAmelCase_ = number_chain
while number < 10000000:
UpperCAmelCase_ = number_chain
number *= 10
return number_chain
def __lowerCAmelCase ( A = 10000000 ):
for i in range(1 , A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 268
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a: str = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = ReformerTokenizer
SCREAMING_SNAKE_CASE__ = ReformerTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCAmelCase ) , 1_000 )
def __A ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __A ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : Optional[int]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __A ( self : int ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@require_torch
@slow
def __A ( self : Tuple ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = " ".join(lowerCAmelCase )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
UpperCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ = encoded_sequence["input_ids"].shape
UpperCAmelCase_ = ReformerModel(lowerCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase )
model(**lowerCAmelCase )
@slow
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCAmelCase , sequences=lowerCAmelCase , )
| 268
| 1
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
__lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 1_0:
_a : Union[str, Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 1_0:
_a : Optional[Any] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 1_0:
_a : str = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 1_0:
_a : Optional[Any] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_a : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_a : Union[str, Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_a : List[str] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_a : str = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Any , __a : Optional[int] ):
"""simple docstring"""
_a : Optional[int] = {}
import re
_a : Optional[int] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_a : Any = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_a : Union[str, Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_a : Optional[Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_a : Optional[int] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_a : str = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_a : int = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_a : str = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_a : List[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_snake_case ):
_a : List[str] = re_encoder_block_conv_in.match(_snake_case )
_a : List[str] = regex_match.groups()
_a : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_a : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_a : List[Any] = re_encoder_block_conv_in.sub(_snake_case , _snake_case )
elif re_encoder_block_resnet.fullmatch(_snake_case ):
_a : Optional[Any] = re_encoder_block_resnet.match(_snake_case )
_a : Optional[Any] = regex_match.groups()
_a : Dict = int(groups[2] ) * 2 + int(groups[3] )
_a : Optional[int] = {'1': 1, '3': 2}[groups[-2]]
_a : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_a : Dict = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_a : int = prefix + resnet_block
_a : Union[str, Any] = re_encoder_block_resnet.sub(_snake_case , _snake_case )
elif re_encoder_block_proj_out.fullmatch(_snake_case ):
_a : str = re_encoder_block_proj_out.match(_snake_case )
_a : str = regex_match.groups()
_a : str = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_a : int = re_encoder_block_proj_out.sub(_snake_case , _snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_snake_case ):
_a : List[str] = re_decoder_block_conv_out.match(_snake_case )
_a : Any = regex_match.groups()
_a : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_a : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_a : Optional[int] = re_decoder_block_conv_out.sub(_snake_case , _snake_case )
elif re_decoder_block_resnet.fullmatch(_snake_case ):
_a : Optional[int] = re_decoder_block_resnet.match(_snake_case )
_a : Optional[Any] = regex_match.groups()
_a : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_a : Dict = {'1': 1, '3': 2}[groups[-2]]
_a : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_a : Tuple = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_a : List[Any] = prefix + resnet_block
_a : Tuple = re_decoder_block_resnet.sub(_snake_case , _snake_case )
elif re_decoder_block_proj_in.fullmatch(_snake_case ):
_a : Dict = re_decoder_block_proj_in.match(_snake_case )
_a : List[str] = regex_match.groups()
_a : Tuple = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_a : Dict = re_decoder_block_proj_in.sub(_snake_case , _snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_snake_case ):
_a : Union[str, Any] = re_prior_cond_conv_out.match(_snake_case )
_a : Tuple = regex_match.groups()
_a : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
_a : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_a : Tuple = re_prior_cond_conv_out.sub(_snake_case , _snake_case )
elif re_prior_cond_resnet.fullmatch(_snake_case ):
_a : Dict = re_prior_cond_resnet.match(_snake_case )
_a : List[Any] = regex_match.groups()
_a : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_a : int = {'1': 1, '3': 2}[groups[-2]]
_a : Dict = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_a : List[Any] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_a : Optional[int] = prefix + resnet_block
_a : Any = re_prior_cond_resnet.sub(_snake_case , _snake_case )
elif re_prior_cond_proj_in.fullmatch(_snake_case ):
_a : Dict = re_prior_cond_proj_in.match(_snake_case )
_a : List[Any] = regex_match.groups()
_a : Any = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_a : str = re_prior_cond_proj_in.sub(_snake_case , _snake_case )
# keep original key
else:
_a : Tuple = original_key
_a : List[Any] = replace_key(_snake_case )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_a : int = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_a : Optional[Any] = original_key
_a : str = original_key
_a : List[str] = value
return new_dict
@torch.no_grad()
def UpperCAmelCase_ (__a : Optional[Any]=None , __a : Union[str, Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_a : List[str] = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_snake_case )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_snake_case )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_a : int = MODEL_MAPPING[model_name.split('/' )[-1]]
_a : Tuple = JukeboxConfig.from_pretrained(_snake_case )
_a : Union[str, Any] = JukeboxModel(_snake_case )
_a : Optional[int] = []
_a : str = {}
for i, dict_name in enumerate(_snake_case ):
_a : Optional[Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_a : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_a : List[str] = old_dic[k]
elif k.endswith('.w' ):
_a : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_a : Tuple = old_dic[k]
else:
_a : Dict = old_dic[k]
_a : List[Any] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_a : Any = fix_jukebox_keys(_snake_case , model.state_dict() , _snake_case , _snake_case )
weight_dict.append(_snake_case )
_a : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(_snake_case )
for i in range(len(_snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_snake_case , _snake_case )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
__lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 229
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase__ = trt.Logger(trt.Logger.WARNING)
lowerCamelCase__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
lowerCamelCase__ = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
lowerCamelCase__ = args.per_device_eval_batch_size
lowerCamelCase__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase__ = True
lowerCamelCase__ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
lowerCamelCase__ = """temp_engine/bert-fp16.engine"""
if args.inta:
lowerCamelCase__ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
lowerCamelCase__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase__ = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = np.asarray(inputs['input_ids'] , dtype=np.intaa )
__lowerCAmelCase : Union[str, Any] = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
__lowerCAmelCase : List[str] = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _UpperCamelCase )
# start time
__lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCamelCase ) for d_inp in d_inputs] + [int(_UpperCamelCase ), int(_UpperCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
cuda.memcpy_dtoh_async(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowerCAmelCase : str = time.time()
__lowerCAmelCase : Optional[Any] = end_time - start_time
__lowerCAmelCase : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase__ = raw_datasets["""validation"""].column_names
lowerCamelCase__ = """question""" if """question""" in column_names else column_names[0]
lowerCamelCase__ = """context""" if """context""" in column_names else column_names[1]
lowerCamelCase__ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase__ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
lowerCamelCase__ = min(args.max_seq_length, tokenizer.model_max_length)
def __lowerCAmelCase (_UpperCamelCase ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowerCAmelCase : Tuple = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_UpperCamelCase , stride=args.doc_stride , return_overflowing_tokens=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowerCAmelCase : str = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowerCAmelCase : List[Any] = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowerCAmelCase : Optional[int] = tokenized_examples.sequence_ids(_UpperCamelCase )
__lowerCAmelCase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowerCAmelCase : List[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowerCAmelCase : Optional[int] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
lowerCamelCase__ = raw_datasets["""validation"""]
# Validation Feature Creation
lowerCamelCase__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
lowerCamelCase__ = default_data_collator
lowerCamelCase__ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
lowerCamelCase__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowerCAmelCase : List[str] = postprocess_qa_predictions(
examples=_UpperCamelCase , features=_UpperCamelCase , predictions=_UpperCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_UpperCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowerCAmelCase : Tuple = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
__lowerCAmelCase : int = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
__lowerCAmelCase : Optional[int] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCamelCase , label_ids=_UpperCamelCase )
lowerCamelCase__ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __lowerCAmelCase (_UpperCamelCase ):
return trt.volume(engine.get_binding_shape(_UpperCamelCase ) ) * engine.get_binding_dtype(_UpperCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase__ = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase__ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f' Num examples = {len(eval_dataset)}')
logger.info(f' Batch size = {args.per_device_eval_batch_size}')
lowerCamelCase__ = 0.0
lowerCamelCase__ = 0
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase__ , lowerCamelCase__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase__ , lowerCamelCase__ = outputs
lowerCamelCase__ = torch.tensor(start_logits)
lowerCamelCase__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowerCamelCase__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowerCamelCase__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowerCamelCase__ = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase__ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_000))
logger.info("""Total Number of Inference = %d""", niter)
lowerCamelCase__ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'Evaluation metrics: {eval_metric}')
| 549
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (_UpperCamelCase ):
# initialize config
if "resnet-50" in model_name:
__lowerCAmelCase : int = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
__lowerCAmelCase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
__lowerCAmelCase : Union[str, Any] = DetrConfig(use_timm_backbone=_UpperCamelCase , backbone_config=_UpperCamelCase )
# set label attributes
__lowerCAmelCase : Optional[int] = 'panoptic' in model_name
if is_panoptic:
__lowerCAmelCase : Dict = 250
else:
__lowerCAmelCase : List[str] = 91
__lowerCAmelCase : Tuple = 'huggingface/label-files'
__lowerCAmelCase : str = 'coco-detection-id2label.json'
__lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : List[Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[Any] = idalabel
__lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase (_UpperCamelCase ):
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Tuple = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Dict = val
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : Tuple = ''
if is_panoptic:
__lowerCAmelCase : Union[str, Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase : List[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : int = in_proj_weight[:256, :]
__lowerCAmelCase : Tuple = in_proj_bias[:256]
__lowerCAmelCase : Dict = in_proj_weight[256:512, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[256:512]
__lowerCAmelCase : int = in_proj_weight[-256:, :]
__lowerCAmelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase : int = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : List[str] = in_proj_weight[:256, :]
__lowerCAmelCase : int = in_proj_bias[:256]
__lowerCAmelCase : Any = in_proj_weight[256:512, :]
__lowerCAmelCase : Any = in_proj_bias[256:512]
__lowerCAmelCase : Any = in_proj_weight[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase : Union[str, Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__lowerCAmelCase : Optional[Any] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase : List[Any] = in_proj_weight_cross_attn[:256, :]
__lowerCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[:256]
__lowerCAmelCase : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
__lowerCAmelCase : Any = in_proj_bias_cross_attn[256:512]
__lowerCAmelCase : str = in_proj_weight_cross_attn[-256:, :]
__lowerCAmelCase : Optional[int] = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Any = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : int = get_detr_config(_UpperCamelCase )
# load original model from torch hub
__lowerCAmelCase : List[str] = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F"Converting model {model_name}..." )
__lowerCAmelCase : List[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=_UpperCamelCase ).eval()
__lowerCAmelCase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_UpperCamelCase ):
if is_panoptic:
__lowerCAmelCase : List[str] = 'detr.' + src
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCamelCase , is_panoptic=_UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase : Optional[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase : List[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCAmelCase : Dict = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase : Tuple = DetrForSegmentation(_UpperCamelCase ) if is_panoptic else DetrForObjectDetection(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# verify our conversion on an image
__lowerCAmelCase : Optional[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
__lowerCAmelCase : List[str] = DetrImageProcessor(format=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = processor(images=prepare_img() , return_tensors='pt' )
__lowerCAmelCase : Optional[int] = encoding['pixel_values']
__lowerCAmelCase : Optional[Any] = detr(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowerCamelCase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 549
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a : Any = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Tuple=1_00 , _UpperCAmelCase : List[Any]=" " ) -> List[str]:
__snake_case = text.split(_UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )]
def __UpperCAmelCase ( _UpperCAmelCase : dict ) -> dict:
__snake_case , __snake_case = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_UpperCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(_UpperCAmelCase )
return {"title": titles, "text": texts}
def __UpperCAmelCase ( _UpperCAmelCase : dict , _UpperCAmelCase : DPRContextEncoder , _UpperCAmelCase : DPRContextEncoderTokenizerFast ) -> dict:
__snake_case = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_UpperCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"]
__snake_case = ctx_encoder(input_ids.to(device=_UpperCAmelCase ) , return_dict=_UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __UpperCAmelCase ( _UpperCAmelCase : "RagExampleArguments" , _UpperCAmelCase : "ProcessingArguments" , _UpperCAmelCase : "IndexHnswArguments" , ) -> Union[str, Any]:
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__snake_case = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__snake_case = dataset.map(_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__snake_case = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCAmelCase )
__snake_case = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__snake_case = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
__snake_case = dataset.map(
partial(_UpperCAmelCase , ctx_encoder=_UpperCAmelCase , ctx_tokenizer=_UpperCAmelCase ) , batched=_UpperCAmelCase , batch_size=processing_args.batch_size , features=_UpperCAmelCase , )
# And finally save your dataset
__snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__snake_case = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_UpperCAmelCase )
# And save the index
__snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=str(Path(_UpperCamelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
__SCREAMING_SNAKE_CASE = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
__SCREAMING_SNAKE_CASE = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=str(Path(_UpperCamelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
__SCREAMING_SNAKE_CASE = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
__SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a : int = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a , a , a : str = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 69
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowerCAmelCase__ : Optional[int] =0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowerCAmelCase__ : str =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowercase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = WATERMARK_BITS
SCREAMING_SNAKE_CASE_ : int = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if images.shape[-1] < 2_5_6:
return images
SCREAMING_SNAKE_CASE_ : Tuple = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.encoder.encode(lowerCAmelCase__ , 'dwtDct' ) for image in images]
SCREAMING_SNAKE_CASE_ : List[str] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 101
| 0
|
'''simple docstring'''
import operator as op
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
_snake_case : Dict = lambda lowerCAmelCase_ , lowerCAmelCase_ : int(x / y ) # noqa: E731 integer division operation
_snake_case : List[Any] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(lowerCAmelCase_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCAmelCase_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(lowerCAmelCase_ ) , sep=''' | ''' )
else:
_snake_case : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(lowerCAmelCase_ ) , sep=''' | ''' )
_snake_case : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(lowerCAmelCase_ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(lowerCAmelCase_ ) , int(lowerCAmelCase_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(lowerCAmelCase_ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 703
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ):
"""simple docstring"""
_snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_snake_case : str = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_snake_case : Union[str, Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 47
| 0
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1000 ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 345
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase=0.01 , __lowerCAmelCase=1_000 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = p_stop
SCREAMING_SNAKE_CASE_ : Dict = max_length
def __iter__( self ):
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.random() < self.p_stop
class snake_case_ ( unittest.TestCase ):
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ):
SCREAMING_SNAKE_CASE_ : int = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
SCREAMING_SNAKE_CASE_ : Tuple = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=False ):
random.seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
SCREAMING_SNAKE_CASE_ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE_ : Dict = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = 42
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE_ : Tuple = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __A ( self ):
Accelerator()
SCREAMING_SNAKE_CASE_ : Any = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 345
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 317
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Optional[int] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
| 1
|
'''simple docstring'''
__magic_name__ : Tuple = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_snake_case = Stack()
_snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
_snake_case = operator_stack.peek()
operator_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__ : int = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 672
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowercase__ = {"facebook/blenderbot-3B": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case : int = bs[:]
snake_case : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__ )
cs.append(2**8 + n )
n += 1
snake_case : Tuple = [chr(SCREAMING_SNAKE_CASE__ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Any:
'''simple docstring'''
snake_case : List[str] = set()
snake_case : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : Dict = char
return pairs
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str="replace" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : Optional[int]="<mask>" , UpperCamelCase__ : Optional[Any]=False , **UpperCamelCase__ : Dict , ) -> str:
"""simple docstring"""
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
snake_case : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
snake_case : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
snake_case : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
snake_case : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
snake_case : Union[str, Any] = json.load(UpperCamelCase__ )
snake_case : Dict = {v: k for k, v in self.encoder.items()}
snake_case : Optional[int] = errors # how to handle errors in decoding
snake_case : List[Any] = bytes_to_unicode()
snake_case : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
snake_case : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
snake_case : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = {}
snake_case : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : Any = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case : str = tuple(UpperCamelCase__ )
snake_case : Union[str, Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
snake_case : Tuple = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case ,snake_case : List[str] = bigram
snake_case : int = []
snake_case : int = 0
while i < len(UpperCamelCase__ ):
try:
snake_case : Optional[int] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : Optional[int] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : str = tuple(UpperCamelCase__ )
snake_case : Tuple = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
snake_case : List[str] = get_pairs(UpperCamelCase__ )
snake_case : str = ''' '''.join(UpperCamelCase__ )
snake_case : int = word
return word
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case : str = []
for token in re.findall(self.pat , UpperCamelCase__ ):
snake_case : Optional[int] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase ( self : int , UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
return self.decoder.get(UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = ''''''.join(UpperCamelCase__ )
snake_case : int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCAmelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : int = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
snake_case : str = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
snake_case : List[str] = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=False , **UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
snake_case : Dict = ''' ''' + text
return (text, kwargs)
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> str:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCamelCase__ : "Conversation" ) -> List[int]:
"""simple docstring"""
snake_case : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
snake_case : List[str] = ''' '''.join(UpperCamelCase__ )
snake_case : Tuple = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
snake_case : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 638
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : List[Any] = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 204
|
'''simple docstring'''
import sys
from collections import defaultdict
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = []
def snake_case__ ( self : Any , lowercase__ : Any ) ->Optional[int]:
'''simple docstring'''
return self.node_position[vertex]
def snake_case__ ( self : str , lowercase__ : Optional[int] , lowercase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCamelCase : List[Any] = pos
def snake_case__ ( self : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Dict ) ->Dict:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase : Tuple = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase : str = 2 * start + 1
else:
_UpperCamelCase : List[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase : Dict = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase : int = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase : Tuple = temp, tempa
_UpperCamelCase : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase__ )
self.top_to_bottom(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def snake_case__ ( self : Optional[int] , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCamelCase : str = position[index]
while index != 0:
_UpperCamelCase : List[str] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase : Tuple = heap[parent]
_UpperCamelCase : List[Any] = position[parent]
self.set_position(position[parent] , lowercase__ )
else:
_UpperCamelCase : Optional[int] = val
_UpperCamelCase : Tuple = temp
self.set_position(lowercase__ , lowercase__ )
break
_UpperCamelCase : Any = parent
else:
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Union[str, Any] = temp
self.set_position(lowercase__ , 0 )
def snake_case__ ( self : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = len(lowercase__ ) // 2 - 1
for i in range(lowercase__ , -1 , -1 ):
self.top_to_bottom(lowercase__ , lowercase__ , len(lowercase__ ) , lowercase__ )
def snake_case__ ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = positions[0]
_UpperCamelCase : Tuple = sys.maxsize
self.top_to_bottom(lowercase__ , 0 , len(lowercase__ ) , lowercase__ )
return temp
def __A ( UpperCAmelCase ) -> Any:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = Heap()
_UpperCamelCase : Optional[int] = [0] * len(UpperCAmelCase )
_UpperCamelCase : List[str] = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase : List[Any] = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase : List[str] = []
for vertex in range(len(UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase )
heap.node_position.append(UpperCAmelCase )
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : int = distance
heap.heapify(UpperCAmelCase ,UpperCAmelCase )
for _ in range(1 ,len(UpperCAmelCase ) ):
_UpperCamelCase : Union[str, Any] = heap.delete_minimum(UpperCAmelCase ,UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase : Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase )]
):
_UpperCamelCase : List[str] = distance
heap.bottom_to_top(
UpperCAmelCase ,heap.get_position(UpperCAmelCase ) ,UpperCAmelCase ,UpperCAmelCase )
_UpperCamelCase : List[str] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCAmelCase_ : Any = int(input("""Enter number of edges: """).strip())
lowerCAmelCase_ : Optional[Any] = defaultdict(list)
for _ in range(edges_number):
lowerCAmelCase_ : Union[str, Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 204
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Any = 'wav2vec2'
def __init__(self : str , a__ : Optional[int]=32 , a__ : str=768 , a__ : Tuple=12 , a__ : str=12 , a__ : Tuple=3072 , a__ : Dict="gelu" , a__ : Tuple=0.1 , a__ : List[Any]=0.1 , a__ : Dict=0.1 , a__ : int=0.0 , a__ : str=0.0 , a__ : List[Any]=0.1 , a__ : List[str]=0.1 , a__ : str=0.0_2 , a__ : List[Any]=1E-5 , a__ : Tuple="group" , a__ : Optional[int]="gelu" , a__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , a__ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , a__ : List[str]=(10, 3, 3, 3, 3, 2, 2) , a__ : Any=False , a__ : Any=128 , a__ : Union[str, Any]=16 , a__ : int=False , a__ : str=True , a__ : Any=0.0_5 , a__ : List[Any]=10 , a__ : Optional[int]=2 , a__ : int=0.0 , a__ : Any=10 , a__ : int=0 , a__ : Dict=320 , a__ : str=2 , a__ : Dict=0.1 , a__ : Union[str, Any]=100 , a__ : List[Any]=256 , a__ : Tuple=256 , a__ : Optional[int]=0.1 , a__ : Dict="sum" , a__ : int=False , a__ : str=False , a__ : Any=256 , a__ : Union[str, Any]=(512, 512, 512, 512, 1500) , a__ : Optional[Any]=(5, 3, 3, 1, 1) , a__ : Optional[int]=(1, 2, 3, 1, 1) , a__ : Any=512 , a__ : List[str]=0 , a__ : Any=1 , a__ : Tuple=2 , a__ : int=False , a__ : List[Any]=3 , a__ : Any=2 , a__ : Any=3 , a__ : str=None , a__ : List[str]=None , **a__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(a__ )
__snake_case = list(a__ )
__snake_case = list(a__ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
__snake_case = do_stable_layer_norm
__snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case = num_codevectors_per_group
__snake_case = num_codevector_groups
__snake_case = contrastive_logits_temperature
__snake_case = feat_quantizer_dropout
__snake_case = num_negatives
__snake_case = codevector_dim
__snake_case = proj_codevector_dim
__snake_case = diversity_loss_weight
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
__snake_case = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(a__ )
__snake_case = list(a__ )
__snake_case = list(a__ )
__snake_case = xvector_output_dim
@property
def a (self : List[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 592
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__snake_case = Vector()
def a (self : str ):
"""simple docstring"""
__snake_case = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(a__ ) , '''(0,0,0,0,0,1)''' )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3, 4] )
self.assertEqual(len(a__ ) , 4 )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 2] )
__snake_case = Vector([1, 2, 3, 4, 5] )
__snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([2, -1, 4] ) # for test of dot product
__snake_case = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def a (self : Optional[int] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def a (self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , a__ , a__ ) ) , '''(3,4,7)''' )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 0, 0, 0, 0, 0] )
__snake_case = x.copy()
self.assertEqual(str(a__ ) , str(a__ ) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(a__ ) , '''(0,1,0)''' )
def a (self : int ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(a__ ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(a__ , a__ ) )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(a__ , a__ ) )
def a (self : str ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__snake_case = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def a (self : Any ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(a__ ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def a (self : Optional[int] ):
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 592
| 1
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
def _UpperCamelCase (a__ :int , a__ :Dict ):
"""simple docstring"""
try:
with open(a__ , """rb""" ) as flax_state_f:
UpperCamelCase__ = from_bytes(a__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(a__ , a__ )
def _UpperCamelCase (a__ :Dict , a__ :int ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda a__ : x.dtype == jnp.bfloataa , a__ ) ).values()
if any(a__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda a__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a__ )
UpperCamelCase__ = """"""
UpperCamelCase__ = flatten_dict(a__ , sep=""".""" )
UpperCamelCase__ = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase__ = flax_key_tuple_array[:-1] + ["""weight"""]
UpperCamelCase__ = jnp.transpose(a__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase__ = flax_key_tuple_array[:-1] + ["""weight"""]
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase__ = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a__ ):
UpperCamelCase__ = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
UpperCamelCase__ = """.""".join(a__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(a__ ) if not isinstance(a__ , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(a__ )
# remove from missing keys
missing_keys.remove(a__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a__ )
pt_model.load_state_dict(a__ )
# re-transform missing_keys to list
UpperCamelCase__ = list(a__ )
if len(a__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a__ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 548
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCamelCase (a__ :str , a__ :complex , a__ :str = "x" , a__ :float = 10**-10 , a__ :int = 1 , ):
"""simple docstring"""
UpperCamelCase__ = symbols(a__ )
UpperCamelCase__ = lambdify(a__ , a__ )
UpperCamelCase__ = lambdify(a__ , diff(a__ , a__ ) )
UpperCamelCase__ = starting_point
while True:
if diff_function(a__ ) != 0:
UpperCamelCase__ = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 548
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ (self ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.dummy_uncond_unet
__UpperCAmelCase = ScoreSdeVeScheduler()
__UpperCAmelCase = ScoreSdeVePipeline(unet=lowercase__ , scheduler=lowercase__ )
sde_ve.to(lowercase__ )
sde_ve.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase__ ).images
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase__ , return_dict=lowercase__ )[
0
]
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = '''google/ncsnpp-church-256'''
__UpperCAmelCase = UNetaDModel.from_pretrained(lowercase__ )
__UpperCAmelCase = ScoreSdeVeScheduler.from_pretrained(lowercase__ )
__UpperCAmelCase = ScoreSdeVePipeline(unet=lowercase__ , scheduler=lowercase__ )
sde_ve.to(lowercase__ )
sde_ve.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=lowercase__ ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 303
|
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
__UpperCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 303
| 1
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase_ : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str=False , ) -> int:
output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
else:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : int = False ) -> int:
UpperCamelCase :int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCamelCase :int = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
UpperCamelCase :Optional[int] = """cpu"""
UpperCamelCase :List[Any] = Path(_UpperCAmelCase )
# VAE DECODER
UpperCamelCase :Tuple = AutoencoderKL.from_pretrained(model_path + """/vae""" )
UpperCamelCase :List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCamelCase :int = vae_decoder.decode
onnx_export(
_UpperCAmelCase , model_args=(
torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_UpperCAmelCase , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase_ : str = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 716
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case__ : int
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
snake_case__ : jnp.dtype = jnp.floataa
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase :List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase :Optional[Any] = self.block_out_channels[i]
UpperCamelCase :List[Any] = self.block_out_channels[i + 1]
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :Tuple = blocks
UpperCamelCase :Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Dict , __lowerCamelCase : Dict ):
UpperCamelCase :Tuple = self.conv_in(__lowerCamelCase )
UpperCamelCase :Optional[Any] = nn.silu(__lowerCamelCase )
for block in self.blocks:
UpperCamelCase :Tuple = block(__lowerCamelCase )
UpperCamelCase :List[str] = nn.silu(__lowerCamelCase )
UpperCamelCase :Dict = self.conv_out(__lowerCamelCase )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , _a , _a ):
snake_case__ : int = 3_2
snake_case__ : int = 4
snake_case__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case__ : Union[bool, Tuple[bool]] = False
snake_case__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
snake_case__ : int = 2
snake_case__ : Union[int, Tuple[int]] = 8
snake_case__ : Optional[Union[int, Tuple[int]]] = None
snake_case__ : int = 1_2_8_0
snake_case__ : float = 0.0
snake_case__ : bool = False
snake_case__ : jnp.dtype = jnp.floataa
snake_case__ : bool = True
snake_case__ : int = 0
snake_case__ : str = "rgb"
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _A ( self : int , __lowerCamelCase : jax.random.KeyArray ):
# init input tensors
UpperCamelCase :int = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase :Union[str, Any] = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase :int = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase :Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase :Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase :Tuple = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase , UpperCamelCase :int = jax.random.split(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def _A ( self : int ):
UpperCamelCase :Dict = self.block_out_channels
UpperCamelCase :Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase :List[Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase :Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase :Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase :Tuple = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
UpperCamelCase :List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase :Union[str, Any] = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase :int = []
UpperCamelCase :str = []
UpperCamelCase :str = block_out_channels[0]
UpperCamelCase :Optional[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase :List[str] = output_channel
UpperCamelCase :Optional[Any] = block_out_channels[i]
UpperCamelCase :Tuple = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase :List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase :List[Any] = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
for _ in range(self.layers_per_block ):
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
if not is_final_block:
UpperCamelCase :str = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
UpperCamelCase :Optional[Any] = down_blocks
UpperCamelCase :Optional[Any] = controlnet_down_blocks
# mid
UpperCamelCase :str = block_out_channels[-1]
UpperCamelCase :Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , ):
UpperCamelCase :Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase :List[Any] = jnp.flip(__lowerCamelCase , axis=1 )
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
UpperCamelCase :Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase :Any = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase :Optional[Any] = jnp.expand_dims(__lowerCamelCase , 0 )
UpperCamelCase :Optional[Any] = self.time_proj(__lowerCamelCase )
UpperCamelCase :Any = self.time_embedding(__lowerCamelCase )
# 2. pre-process
UpperCamelCase :int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Dict = self.conv_in(__lowerCamelCase )
UpperCamelCase :Any = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Optional[int] = self.controlnet_cond_embedding(__lowerCamelCase )
sample += controlnet_cond
# 3. down
UpperCamelCase :int = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase , UpperCamelCase :Optional[Any] = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
UpperCamelCase , UpperCamelCase :Union[str, Any] = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase :List[str] = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
# 5. contronet blocks
UpperCamelCase :str = ()
for down_block_res_sample, controlnet_block in zip(__lowerCamelCase , self.controlnet_down_blocks ):
UpperCamelCase :Any = controlnet_block(__lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase :Optional[Any] = controlnet_down_block_res_samples
UpperCamelCase :str = self.controlnet_mid_block(__lowerCamelCase )
# 6. scaling
UpperCamelCase :str = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCamelCase , mid_block_res_sample=__lowerCamelCase )
| 590
| 0
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowerCAmelCase_ = '''Dummy User'''
lowerCAmelCase_ = '''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'''
lowerCAmelCase_ = '''https://hub-ci.huggingface.co'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowerCAmelCase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCamelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
HfFolder.save_token(_UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
return HfApi(endpoint=_UpperCamelCase )
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = HfFolder.get_token()
HfFolder.save_token(_UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
def _cleanup_repo(_UpperCamelCase ):
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
@contextmanager
def _temporary_repo(_UpperCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = f'''repo_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ : Optional[int] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ : Optional[Any] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : int = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
snake_case_ : List[str] = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 60
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger()
@dataclass
class _UpperCAmelCase :
lowerCamelCase_ : nn.Module
lowerCamelCase_ : List[nn.Module] = field(default_factory=lowercase )
lowerCamelCase_ : list = field(default_factory=lowercase )
def _snake_case ( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tensor , UpperCAmelCase : Tensor):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(list(m.modules())) == 1 or isinstance(UpperCAmelCase , nn.Convad) or isinstance(UpperCAmelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(UpperCAmelCase)
def __call__( self : Optional[int] , UpperCAmelCase : Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(UpperCAmelCase)
[x.remove() for x in self.handles]
return self
@property
def _snake_case ( self : Optional[Any]):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _UpperCAmelCase :
lowerCamelCase_ : nn.Module
lowerCamelCase_ : nn.Module
lowerCamelCase_ : int = 1
lowerCamelCase_ : List = field(default_factory=lowercase )
lowerCamelCase_ : List = field(default_factory=lowercase )
lowerCamelCase_ : bool = True
def __call__( self : List[str] , UpperCAmelCase : Tensor):
SCREAMING_SNAKE_CASE_ :str = Tracker(self.dest)(UpperCAmelCase).parametrized
SCREAMING_SNAKE_CASE_ :Optional[int] = Tracker(self.src)(UpperCAmelCase).parametrized
SCREAMING_SNAKE_CASE_ :str = list(filter(lambda UpperCAmelCase: type(UpperCAmelCase) not in self.src_skip , UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :Tuple = list(filter(lambda UpperCAmelCase: type(UpperCAmelCase) not in self.dest_skip , UpperCAmelCase))
if len(UpperCAmelCase) != len(UpperCAmelCase) and self.raise_if_mismatch:
raise Exception(
F"Numbers of operations are different. Source module has {len(UpperCAmelCase)} operations while"
F" destination module has {len(UpperCAmelCase)}.")
for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}")
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase : nn.Module):
super().__init__()
SCREAMING_SNAKE_CASE_ :List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block"), F"Unexpected layer name {k}"
SCREAMING_SNAKE_CASE_ :Any = len(UpperCAmelCase) + 1
feature_blocks.append((F"res{block_index}", v))
SCREAMING_SNAKE_CASE_ :List[str] = nn.ModuleDict(UpperCAmelCase)
def _snake_case ( self : List[Any] , UpperCAmelCase : Tensor):
return get_trunk_forward_outputs(
UpperCAmelCase , out_feat_keys=UpperCAmelCase , feature_blocks=self._feature_blocks , )
class _UpperCAmelCase ( lowercase ):
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : str):
SCREAMING_SNAKE_CASE_ :Optional[Any] = x.split("-")
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : str , UpperCAmelCase : str):
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE_ :List[str] = self.convert_name_to_timm(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = partial(lambda: (timm.create_model(UpperCAmelCase , pretrained=UpperCAmelCase).eval(), None))
else:
SCREAMING_SNAKE_CASE_ :Tuple = super().__getitem__(UpperCAmelCase)
return val
class _UpperCAmelCase ( lowercase ):
def __getitem__( self : Any , UpperCAmelCase : str):
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = RegNetModel
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = RegNetForImageClassification
return val
def lowercase ( a , a , a ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE_ :List[Any] = from_state_dict[from_key].clone()
print(F"Copied key={from_key} to={to_key}" )
return to_state_dict
def lowercase ( a , a , a , a , a , a = True , ):
'''simple docstring'''
print(F"Converting {name}..." )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = from_model_func()
SCREAMING_SNAKE_CASE_ :Optional[Any] = our_model_func(a ).eval()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ModuleTransfer(src=a , dest=a , raise_if_mismatch=a )
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(a )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE_ :Tuple = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE_ :str = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
SCREAMING_SNAKE_CASE_ :Optional[Any] = manually_copy_vissl_head(a , our_model.state_dict() , a )
our_model.load_state_dict(a )
SCREAMING_SNAKE_CASE_ :str = our_model(a , output_hidden_states=a )
SCREAMING_SNAKE_CASE_ :Dict = (
our_outputs.logits if isinstance(a , a ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = from_model(a )
SCREAMING_SNAKE_CASE_ :int = from_output[-1] if type(a ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(a , a ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=a , )
SCREAMING_SNAKE_CASE_ :List[Any] = 224 if "seer" not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE_ :Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=a )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=a , )
print(F"Pushed {name}" )
def lowercase ( a , a = None , a = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Union[str, Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ :str = 1000
SCREAMING_SNAKE_CASE_ :List[Any] = (1, num_labels)
SCREAMING_SNAKE_CASE_ :Dict = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ :Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ :Any = json.load(open(cached_download(hf_hub_url(a , a , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE_ :Optional[int] = {int(a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ :Optional[Any] = idalabel
SCREAMING_SNAKE_CASE_ :List[str] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ :int = partial(a , num_labels=a , idalabel=a , labelaid=a )
SCREAMING_SNAKE_CASE_ :str = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
SCREAMING_SNAKE_CASE_ :List[str] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE_ :Tuple = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(a , a ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE_ :int = torch.hub.load_state_dict_from_url(a , model_dir=str(a ) , map_location="cpu" )
SCREAMING_SNAKE_CASE_ :List[str] = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE_ :Union[str, Any] = files["classy_state_dict"]["base_model"]["model"]
SCREAMING_SNAKE_CASE_ :List[str] = model_state_dict["trunk"]
model.load_state_dict(a )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE_ :Union[str, Any] = partial(
a , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE_ :int = partial(
a , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE_ :int = partial(
a , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE_ :Tuple = partial(
a , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE_ :Optional[int] = partial(
a , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE_ :int = partial(
a , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE_ :List[str] = partial(
a , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE_ :List[Any] = partial(
a , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , a , a , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , a , a , a , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 631
| 0
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__UpperCAmelCase : Union[str, Any] = "scheduler_config.json"
class _snake_case ( _A ):
_A = 1
_A = 2
_A = 3
_A = 4
_A = 5
_A = 6
_A = 7
_A = 8
_A = 9
_A = 10
_A = 11
_A = 12
_A = 13
_A = 14
@dataclass
class _snake_case ( _A ):
_A = 42
class _snake_case :
_A = SCHEDULER_CONFIG_NAME
_A = []
_A = True
@classmethod
def lowerCAmelCase_ ( cls ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase=False ,**UpperCamelCase ,) -> Tuple:
snake_case__ :str = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase ,subfolder=UpperCamelCase ,return_unused_kwargs=UpperCamelCase ,return_commit_hash=UpperCamelCase ,**UpperCamelCase ,)
return cls.from_config(UpperCamelCase ,return_unused_kwargs=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = False ,**UpperCamelCase ) -> Union[str, Any]:
self.save_config(save_directory=UpperCamelCase ,push_to_hub=UpperCamelCase ,**UpperCamelCase )
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls ) -> str:
snake_case__ :Dict = list(set([cls.__name__] + cls._compatibles ) )
snake_case__ :Optional[int] = importlib.import_module(__name__.split("." )[0] )
snake_case__ :int = [
getattr(UpperCamelCase ,UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase ,UpperCamelCase )
]
return compatible_classes
| 700
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 0
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A:
'''simple docstring'''
def __init__( self : Dict , A_ : List[str] , A_ : List[str]=3 , A_ : Dict=32 , A_ : Union[str, Any]=3 , A_ : Any=10 , A_ : List[Any]=[10, 20, 30, 40] , A_ : int=[1, 1, 2, 1] , A_ : List[str]=True , A_ : Tuple=True , A_ : Tuple="relu" , A_ : str=3 , A_ : int=None , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(A_ )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self : Optional[Any] , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = TFRegNetModel(config=A_ )
lowerCamelCase_ = model(A_ , training=A_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self : int , A_ : Tuple , A_ : int , A_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFRegNetForImageClassification(A_ )
lowerCamelCase_ = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFRegNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(A_ : List[str] , A_ : List[str] , A_ : List[Any] ):
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) , training=A_ )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(A_ , A_ , A_ )
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(A_ : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any] , A_ : Dict={} ):
lowerCamelCase_ = model(A_ , return_dict=A_ , **A_ )
lowerCamelCase_ = model(A_ , return_dict=A_ , **A_ ).to_tuple()
def recursive_check(A_ : Any , A_ : Optional[Any] ):
if isinstance(A_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(A_ , A_ ):
recursive_check(A_ , A_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(A_ , A_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(A_ , A_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = self._prepare_for_class(A_ , A_ )
lowerCamelCase_ = self._prepare_for_class(A_ , A_ )
check_equivalence(A_ , A_ , A_ )
lowerCamelCase_ = self._prepare_for_class(A_ , A_ , return_labels=A_ )
lowerCamelCase_ = self._prepare_for_class(A_ , A_ , return_labels=A_ )
check_equivalence(A_ , A_ , A_ )
lowerCamelCase_ = self._prepare_for_class(A_ , A_ )
lowerCamelCase_ = self._prepare_for_class(A_ , A_ )
check_equivalence(A_ , A_ , A_ , {'output_hidden_states': True} )
lowerCamelCase_ = self._prepare_for_class(A_ , A_ , return_labels=A_ )
lowerCamelCase_ = self._prepare_for_class(A_ , A_ , return_labels=A_ )
check_equivalence(A_ , A_ , A_ , {'output_hidden_states': True} )
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def a__ ( self : str ) -> str:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFRegNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=A_ , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**A_ , training=A_ )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A_ , atol=1E-4 )
| 70
|
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
if len(lowercase ) != len(lowercase ):
raise ValueError('String lengths must match!' )
lowerCamelCase_ = 0
for chara, chara in zip(lowercase , lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : int ) ->list:
A__ : Dict = word.split()
def justify(UpperCAmelCase__ : list, UpperCAmelCase__ : int, UpperCAmelCase__ : int ) -> str:
A__ : Any = max_width - width
A__ : List[Any] = len(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A__ : List[str] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A__ : Dict = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A__ : Any = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCAmelCase__ ):
num_spaces_between_words_list[i] += 1
A__ : Tuple = []
for i in range(UpperCAmelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCAmelCase__ )
A__ : int = []
A__ : list[str] = []
A__ : Any = 0
for word in words:
if width + len(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCAmelCase__ )
width += len(UpperCAmelCase__ )
else:
# justify the line and add it to result
answer.append(justify(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) )
# reset new line and new width
A__ , A__ : Optional[int] = [word], len(UpperCAmelCase__ )
A__ : int = max_width - width - len(UpperCAmelCase__ )
answer.append(""" """.join(UpperCAmelCase__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 498
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->str:
A__ : Tuple = {}
A__ : Union[str, Any] = tokenizer(example["""content"""], truncation=UpperCAmelCase__ )["""input_ids"""]
A__ : Any = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
A_ = HfArgumentParser(PretokenizationArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
A_ = time.time()
A_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
A_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 498
| 1
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase__ : Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ : Optional[Any] = k.replace(lowerCamelCase_ , lowerCamelCase_ )
return k
def __UpperCAmelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : dict ) -> PegasusForConditionalGeneration:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DEFAULTS.copy()
cfg_kwargs.update(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = PegasusConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = PegasusForConditionalGeneration(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ : Dict = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ : str = rename_state_dict_key(lowerCamelCase_ )
if new_k not in sd:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ : str = v.T
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(lowerCamelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ : int = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ : Optional[int] = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ : Any = {k: torch.zeros_like(lowerCamelCase_ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = torch_model.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def __UpperCAmelCase ( lowerCamelCase_ : List[str]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = tf.train.list_variables(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : List[str] = ['Adafactor', 'global_step']
for name, shape in tqdm(lowerCamelCase_ , desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ : str = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = array
return tf_weights
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ).parent.name
SCREAMING_SNAKE_CASE_ : int = task_specific_params[F'summarization_{dataset}']['max_position_embeddings']
SCREAMING_SNAKE_CASE_ : str = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=lowerCamelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCamelCase_ )
# convert model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_tf_weights_as_numpy(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = task_specific_params[F'summarization_{dataset}']
if dataset == "large":
SCREAMING_SNAKE_CASE_ : Tuple = task_specific_params
SCREAMING_SNAKE_CASE_ : Any = convert_pegasus(lowerCamelCase_ , lowerCamelCase_ )
torch_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(lowerCamelCase_ , Path(lowerCamelCase_ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase__ : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCamelCase__ : Optional[Any] = Path(args.tf_ckpt_path).parent.name
UpperCamelCase__ : List[str] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 105
|
import os
import sys
import unittest
UpperCamelCase__ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase__ : Tuple = os.path.join(git_repo_path, '''src''', '''transformers''')
UpperCamelCase__ : List[Any] = '''
{0} = None
'''
UpperCamelCase__ : str = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
UpperCamelCase__ : List[Any] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(snake_case__ ,'tokenizers' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(snake_case__ ,'tensorflow_text' )
SCREAMING_SNAKE_CASE_ : int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tokenizers' )
SCREAMING_SNAKE_CASE_ : str = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tensorflow_text' )
SCREAMING_SNAKE_CASE_ : Tuple = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(snake_case__ ,'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' ,snake_case__ )
self.assertIn('tensorflow_text' ,snake_case__ )
self.assertIn('sentencepiece_and_tokenizers' ,snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' ,objects['torch'] )
self.assertIn('TFBertModel' ,objects['tf'] )
self.assertIn('FlaxBertModel' ,objects['flax'] )
self.assertIn('BertModel' ,objects['torch'] )
self.assertIn('TFBertTokenizer' ,objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' ,objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = create_dummy_object('CONSTANT' ,'\'torch\'' )
self.assertEqual(snake_case__ ,'\nCONSTANT = None\n' )
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_object('function' ,'\'torch\'' )
self.assertEqual(
snake_case__ ,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
SCREAMING_SNAKE_CASE_ : List[str] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_object('FakeClass' ,'\'torch\'' )
self.assertEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
SCREAMING_SNAKE_CASE_ : Dict = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] ,snake_case__ )
| 105
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = 3
snake_case_ : str = 2_5_0
snake_case_ : Union[str, Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
snake_case_ : List[str] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = self._get_tensors(5 )
snake_case_ : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ , snake_case_ : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ , snake_case_ : Dict = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = MaxLengthCriteria(max_length=1_0 )
snake_case_ , snake_case_ : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ , snake_case_ : Dict = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ , snake_case_ : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case_ , snake_case_ : Union[str, Any] = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ , snake_case_ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ , snake_case_ : Union[str, Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Any = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : str = self._get_tensors(5 )
snake_case_ : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
snake_case_ : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 701
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a_ = get_tests_dir("""fixtures""")
a_ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
a_ = get_tests_dir("""fixtures/dummy-config.json""")
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__lowerCamelCase = WavaVecaFeatureExtractor(**__UpperCAmelCase )
# save in new folder
model_config.save_pretrained(__UpperCAmelCase )
config.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase , revision='''aaaaaa''' )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCamelCase ( self ):
'''simple docstring'''
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCAmelCase ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__UpperCAmelCase )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self ):
'''simple docstring'''
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = True
try:
AutoConfig.register('''custom''' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(__UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 175
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """t5"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __UpperCAmelCase=32128 , __UpperCAmelCase=512 , __UpperCAmelCase=64 , __UpperCAmelCase=2048 , __UpperCAmelCase=6 , __UpperCAmelCase=None , __UpperCAmelCase=8 , __UpperCAmelCase=32 , __UpperCAmelCase=128 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = d_kv
__lowerCamelCase = d_ff
__lowerCamelCase = num_layers
__lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCamelCase = num_heads
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = relative_attention_max_distance
__lowerCamelCase = dropout_rate
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_factor
__lowerCamelCase = feed_forward_proj
__lowerCamelCase = use_cache
__lowerCamelCase = self.feed_forward_proj.split('''-''' )
__lowerCamelCase = act_info[-1]
__lowerCamelCase = act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowerCamelCase = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__lowerCamelCase = '''past_encoder_sequence + sequence'''
__lowerCamelCase = {0: '''batch'''}
__lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 13
| 175
| 1
|
from math import sqrt
def A_ ( snake_case : Tuple = 1000000 ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCAmelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 714
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451
| 0
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case :int ={
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case :Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 106
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : int , __UpperCamelCase : Callable , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[dict] = None , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : str , ) -> List[str]:
super().__init__(
features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
A = Generator(
cache_dir=__UpperCamelCase , features=__UpperCamelCase , generator=__UpperCamelCase , gen_kwargs=__UpperCamelCase , **__UpperCamelCase , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
# Build iterable dataset
if self.streaming:
A = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
A = None
A = None
A = None
A = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
A = self.builder.as_dataset(
split='train' , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
| 106
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : str , _snake_case : list[str] ) -> int:
'''simple docstring'''
_A = ''
for word_or_phrase in separated:
if not isinstance(snake_case__ , snake_case__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''deit'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : int=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=224 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : int=16 , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = encoder_stride
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : Any ):
return 1E-4
| 505
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase__ : str = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ : str = '''RegNetConfig'''
# Base docstring
UpperCamelCase__ : List[str] = '''facebook/regnet-y-040'''
UpperCamelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCamelCase__ : Dict = '''facebook/regnet-y-040'''
UpperCamelCase__ : List[str] = '''tabby, tabby cat'''
UpperCamelCase__ : Any = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ = 3 ,snake_case__ = 1 ,snake_case__ = 1 ,snake_case__ = "relu" ,**snake_case__ ,):
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE_ : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ : Any = tf.keras.layers.ConvaD(
filters=snake_case__ ,kernel_size=snake_case__ ,strides=snake_case__ ,padding='VALID' ,groups=snake_case__ ,use_bias=snake_case__ ,name='convolution' ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization' )
SCREAMING_SNAKE_CASE_ : int = ACTaFN[activation] if activation is not None else tf.identity
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.convolution(self.padding(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : int = self.normalization(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = config.num_channels
SCREAMING_SNAKE_CASE_ : int = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE_ : str = tf.transpose(snake_case__ ,perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ : Dict = self.embedder(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ = 2 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tf.keras.layers.ConvaD(
filters=snake_case__ ,kernel_size=1 ,strides=snake_case__ ,use_bias=snake_case__ ,name='convolution' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization' )
def snake_case ( self ,snake_case__ ,snake_case__ = False ):
return self.normalization(self.convolution(snake_case__ ) ,training=snake_case__ )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ ,name='pooler' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=snake_case__ ,kernel_size=1 ,activation='relu' ,name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2' ),
]
def snake_case ( self ,snake_case__ ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pooler(snake_case__ )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE_ : List[Any] = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 1 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Tuple = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : Any = (
TFRegNetShortCut(snake_case__ ,stride=snake_case__ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE_ : Any = [
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
snake_case__ ,stride=snake_case__ ,groups=snake_case__ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=snake_case__ ,name='layer.2' ),
]
SCREAMING_SNAKE_CASE_ : Dict = ACTaFN[config.hidden_act]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Any = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.shortcut(snake_case__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Any = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 1 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Tuple = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : List[str] = (
TFRegNetShortCut(snake_case__ ,stride=snake_case__ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
SCREAMING_SNAKE_CASE_ : List[str] = [
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
snake_case__ ,stride=snake_case__ ,groups=snake_case__ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetSELayer(snake_case__ ,reduced_channels=int(round(in_channels / 4 ) ) ,name='layer.2' ),
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=snake_case__ ,name='layer.3' ),
]
SCREAMING_SNAKE_CASE_ : int = ACTaFN[config.hidden_act]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Dict = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.shortcut(snake_case__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Tuple = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 2 ,snake_case__ = 2 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
SCREAMING_SNAKE_CASE_ : str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ ,snake_case__ ,snake_case__ ,stride=snake_case__ ,name='layers.0' ),
*[layer(snake_case__ ,snake_case__ ,snake_case__ ,name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def snake_case ( self ,snake_case__ ):
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_module(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,) )
SCREAMING_SNAKE_CASE_ : Any = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ ,snake_case__ ,snake_case__ ,depth=snake_case__ ,name=F'stages.{i+1}' ) )
def snake_case ( self ,snake_case__ ,snake_case__ = False ,snake_case__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ : int = stage_module(snake_case__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ ,hidden_states=snake_case__ )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
__a : Tuple = RegNetConfig
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = config
SCREAMING_SNAKE_CASE_ : Any = TFRegNetEmbeddings(snake_case__ ,name='embedder' )
SCREAMING_SNAKE_CASE_ : Dict = TFRegNetEncoder(snake_case__ ,name='encoder' )
SCREAMING_SNAKE_CASE_ : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ ,name='pooler' )
@unpack_inputs
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : Tuple = self.embedder(snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.encoder(
snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ : int = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE_ : Dict = tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple([tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ ,pooler_output=snake_case__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = RegNetConfig
__a : Dict = "regnet"
__a : Any = "pixel_values"
@property
def snake_case ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) ,dtype=tf.floataa )}
UpperCamelCase__ : Any = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase__ : List[str] = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCamelCase_ , )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
super().__init__(snake_case__ ,*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = TFRegNetMainLayer(snake_case__ ,name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=snake_case__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__=False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[Any] = self.regnet(
pixel_values=snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCamelCase_ , )
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
def __init__( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
super().__init__(snake_case__ ,*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = config.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = TFRegNetMainLayer(snake_case__ ,name='regnet' )
# classification head
SCREAMING_SNAKE_CASE_ : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=snake_case__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def snake_case ( self ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__=False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : int = self.regnet(
snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ : List[str] = self.classifier[0](snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.classifier[1](snake_case__ )
SCREAMING_SNAKE_CASE_ : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ ,logits=snake_case__ )
if not return_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ ,logits=snake_case__ ,hidden_states=outputs.hidden_states )
| 701
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: float, a_: float, a_: float, a_: float, a_: float, ):
_UpperCAmelCase : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
_UpperCAmelCase : Optional[Any] = 1 - (matter_density + radiation_density + dark_energy)
_UpperCAmelCase : Any = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_UpperCAmelCase : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__a = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 494
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''gpt_bigcode'''
UpperCamelCase_ : Optional[Any] = ['''past_key_values''']
UpperCamelCase_ : int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any]=5_0_2_5_7 , lowerCAmelCase__ : Optional[Any]=1_0_2_4 , lowerCAmelCase__ : int=7_6_8 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[Any]="gelu_pytorch_tanh" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=5_0_2_5_6 , lowerCAmelCase__ : List[str]=5_0_2_5_6 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : int = n_positions
_UpperCAmelCase : int = n_embd
_UpperCAmelCase : List[Any] = n_layer
_UpperCAmelCase : Any = n_head
_UpperCAmelCase : int = n_inner
_UpperCAmelCase : Optional[Any] = activation_function
_UpperCAmelCase : Optional[int] = resid_pdrop
_UpperCAmelCase : Dict = embd_pdrop
_UpperCAmelCase : Optional[int] = attn_pdrop
_UpperCAmelCase : int = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = scale_attn_weights
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : Dict = attention_softmax_in_fpaa
_UpperCAmelCase : str = scale_attention_softmax_in_fpaa
_UpperCAmelCase : List[str] = multi_query
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : str = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 494
| 1
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def a ( _UpperCAmelCase : Features ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = np.inf
def set_batch_size(_UpperCAmelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : Dict = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and feature.dtype == "binary":
__UpperCAmelCase : List[str] = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCAmelCase , _UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : NestedDataStructureLike[PathLike] , a_ : Optional[NamedSplit] = None , a_ : Optional[Features] = None , a_ : str = None , a_ : bool = False , a_ : bool = False , a_ : Optional[int] = None , **a_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
a_ , split=a_ , features=a_ , cache_dir=a_ , keep_in_memory=a_ , streaming=a_ , num_proc=a_ , **a_ , )
__UpperCAmelCase : List[str] = path_or_paths if isinstance(a_ , a_ ) else {self.split: path_or_paths}
__UpperCAmelCase : Union[str, Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
__UpperCAmelCase : Optional[int] = Parquet(
cache_dir=a_ , data_files=a_ , features=a_ , hash=a_ , **a_ , )
def snake_case__ ( self : str ):
'''simple docstring'''
if self.streaming:
__UpperCAmelCase : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Optional[int] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , num_proc=self.num_proc , )
__UpperCAmelCase : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , a_ : Dataset , a_ : Union[PathLike, BinaryIO] , a_ : Optional[int] = None , **a_ : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = dataset
__UpperCAmelCase : str = path_or_buf
__UpperCAmelCase : Any = batch_size or get_writer_batch_size(dataset.features )
__UpperCAmelCase : List[Any] = parquet_writer_kwargs
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
__UpperCAmelCase : List[Any] = self._write(file_obj=a_ , batch_size=a_ , **self.parquet_writer_kwargs )
else:
__UpperCAmelCase : int = self._write(file_obj=self.path_or_buf , batch_size=a_ , **self.parquet_writer_kwargs )
return written
def snake_case__ ( self : Dict , a_ : BinaryIO , a_ : int , **a_ : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : str = parquet_writer_kwargs.pop('''path_or_buf''' , a_ )
__UpperCAmelCase : Union[str, Any] = self.dataset.features.arrow_schema
__UpperCAmelCase : List[Any] = pq.ParquetWriter(a_ , schema=a_ , **a_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a_ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
__UpperCAmelCase : Union[str, Any] = query_table(
table=self.dataset._data , key=slice(a_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a_ )
written += batch.nbytes
writer.close()
return written
| 241
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__UpperCAmelCase : Optional[Any] = TOKENIZER_CLASSES
else:
__UpperCAmelCase : List[str] = {tokenizer_name: getattr(_UpperCAmelCase , tokenizer_name + '''Fast''' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__UpperCAmelCase : Any = TOKENIZER_CLASSES[tokenizer_name]
__UpperCAmelCase : Any = True
if checkpoint_name is None:
__UpperCAmelCase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__UpperCAmelCase : List[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(_UpperCAmelCase , force_download=_UpperCAmelCase )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = checkpoint.split('''/''' )
__UpperCAmelCase : List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif add_prefix:
__UpperCAmelCase : List[str] = checkpoint
__UpperCAmelCase : List[Any] = dump_path
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Tuple = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__UpperCAmelCase : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__UpperCAmelCase : str = file_path.split(_UpperCAmelCase )[-1][0]
if next_char == "/":
__UpperCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__UpperCAmelCase : Optional[int] = tokenizer.save_pretrained(
_UpperCAmelCase , legacy_format=_UpperCAmelCase , filename_prefix=_UpperCAmelCase )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(_UpperCAmelCase )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__A =parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 241
| 1
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Optional[int] = VideoToVideoSDPipeline
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''}) - {'''image''', '''width''', '''height'''}
SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''}) - {'''image'''}
SCREAMING_SNAKE_CASE : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
SCREAMING_SNAKE_CASE : List[str] = False
# No `output_type`.
SCREAMING_SNAKE_CASE : Tuple = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE_ : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE_ : Dict = CLIPTextModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = VideoToVideoSDPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = 'np'
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe(**_SCREAMING_SNAKE_CASE ).frames
SCREAMING_SNAKE_CASE_ : List[str] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = torch.randn((1, 10, 3, 1024, 576) , generator=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = video.to('cuda' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE_ : int = pipe(_SCREAMING_SNAKE_CASE , video=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=3 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE_ : Tuple = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 511
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase : Optional[Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase : Dict = 'roberta'
elif args.model_type == "gpt2":
lowerCAmelCase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase : Union[str, Any] = 'transformer'
lowerCAmelCase : int = model.state_dict()
lowerCAmelCase : Tuple = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase : Union[str, Any] = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase : Any = F'{prefix}.embeddings.{w}.weight'
lowerCAmelCase : Optional[int] = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase : Optional[Any] = F'{prefix}.embeddings.LayerNorm.{w}'
lowerCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase : List[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase : List[str] = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowerCAmelCase : Union[str, Any] = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase : Union[str, Any] = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase : Union[str, Any] = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase : Union[str, Any] = state_dict[F'lm_head.dense.{w}']
lowerCAmelCase : List[str] = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase : str = state_dict[F'{prefix}.ln_f.{w}']
lowerCAmelCase : int = state_dict['lm_head.weight']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 511
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=1 / 255 , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : Union[str, Any] = min_resolution
_lowerCamelCase : List[Any] = max_resolution
_lowerCamelCase : Union[str, Any] = do_resize
_lowerCamelCase : str = size
_lowerCamelCase : List[str] = do_rescale
_lowerCamelCase : Optional[int] = rescale_factor
_lowerCamelCase : str = do_normalize
_lowerCamelCase : Optional[int] = image_mean
_lowerCamelCase : Tuple = image_std
_lowerCamelCase : Tuple = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def A_ ( self , lowercase , lowercase=False ):
if not batched:
_lowerCamelCase : Union[str, Any] = image_inputs[0]
if isinstance(lowercase , Image.Image ):
_lowerCamelCase, _lowerCamelCase : Optional[Any] = image.size
else:
_lowerCamelCase, _lowerCamelCase : List[str] = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : List[Any] = int(self.size['shortest_edge'] * h / w )
_lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
elif w > h:
_lowerCamelCase : List[str] = self.size['shortest_edge']
_lowerCamelCase : Optional[Any] = int(self.size['shortest_edge'] * w / h )
else:
_lowerCamelCase : int = self.size['shortest_edge']
_lowerCamelCase : Optional[Any] = self.size['shortest_edge']
else:
_lowerCamelCase : Union[str, Any] = []
for image in image_inputs:
_lowerCamelCase, _lowerCamelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : Tuple = max(lowercase , key=lambda lowercase : item[0] )[0]
_lowerCamelCase : Any = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DetrImageProcessor if is_vision_available() else None
def A_ ( self ):
_lowerCamelCase : int = DetrImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase , 'rescale_factor' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'do_pad' ) )
def A_ ( self ):
_lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
_lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase, _lowerCamelCase : str = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
_lowerCamelCase : Union[str, Any] = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : str = image_processing(lowercase , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : Dict = image_processing(lowercase , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
_lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCamelCase : int = json.loads(f.read() )
_lowerCamelCase : str = {'image_id': 39769, 'annotations': target}
# encode them
_lowerCamelCase : str = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
_lowerCamelCase : int = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt' )
# verify pixel values
_lowerCamelCase : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_lowerCamelCase : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_lowerCamelCase : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_lowerCamelCase : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_lowerCamelCase : Tuple = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_lowerCamelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_lowerCamelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_lowerCamelCase : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify orig_size
_lowerCamelCase : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_lowerCamelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
_lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCamelCase : List[Any] = json.loads(f.read() )
_lowerCamelCase : str = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_lowerCamelCase : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCamelCase : List[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
_lowerCamelCase : Dict = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt' )
# verify pixel values
_lowerCamelCase : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_lowerCamelCase : Optional[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_lowerCamelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_lowerCamelCase : Union[str, Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_lowerCamelCase : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_lowerCamelCase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify masks
_lowerCamelCase : List[str] = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase )
# verify orig_size
_lowerCamelCase : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_lowerCamelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
| 492
|
"""simple docstring"""
from pathlib import Path
import fire
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : int = Path(lowercase__ )
_lowerCamelCase : List[Any] = Path(lowercase__ )
dest_dir.mkdir(exist_ok=lowercase__ )
for path in src_dir.iterdir():
_lowerCamelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n]
_lowerCamelCase : Any = dest_dir.joinpath(path.name )
print(lowercase__ )
dest_path.open('w' ).write('\n'.join(lowercase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 492
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
# TODO: upload to AWS
_SCREAMING_SNAKE_CASE : Any = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : int = "retribert"
def __init__( self , a__=30_522 , a__=768 , a__=8 , a__=12 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=True , a__=128 , a__=0 , **a__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=a__ , **a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = share_encoders
snake_case_ = projection_dim
| 400
|
'''simple docstring'''
import os
import pytest
from attr import dataclass
_SCREAMING_SNAKE_CASE : Dict = "us-east-1" # defaults region
@dataclass
class _snake_case :
lowerCAmelCase_ : str
lowerCAmelCase_ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
lowerCAmelCase_ : Any = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
lowerCAmelCase_ : Any = {**hyperparameters, "max_steps": 1000}
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return F'{self.framework}-transfromers-test'
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCamelCase_( snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = SageMakerTestEnvironment(framework=request.cls.framework )
| 400
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
SCREAMING_SNAKE_CASE_ :Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = 'segformer.encoder.' + key
if key.startswith('backbone' ):
SCREAMING_SNAKE_CASE_ :Any = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE_ :List[Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
SCREAMING_SNAKE_CASE_ :List[str] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
SCREAMING_SNAKE_CASE_ :List[Any] = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE_ :Tuple = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
SCREAMING_SNAKE_CASE_ :str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE_ :Any = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE_ :Optional[Any] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE_ :Optional[Any] = key[key.find('block' ) + len('block' )]
SCREAMING_SNAKE_CASE_ :Any = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
SCREAMING_SNAKE_CASE_ :List[str] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
SCREAMING_SNAKE_CASE_ :Optional[int] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
SCREAMING_SNAKE_CASE_ :Optional[int] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE_ :Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE_ :Any = key.replace('linear_fuse.conv' , 'linear_fuse' )
SCREAMING_SNAKE_CASE_ :int = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE_ :Tuple = key[key.find('linear_c' ) + len('linear_c' )]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if key.startswith('head' ):
SCREAMING_SNAKE_CASE_ :int = key.replace('head' , 'classifier' )
SCREAMING_SNAKE_CASE_ :Tuple = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE_ :Dict = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
SCREAMING_SNAKE_CASE_ :str = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ :Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE_ :List[Any] = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE_ :Optional[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE_ :Optional[int] = kv_bias[
config.hidden_sizes[i] :
]
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ :int = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :List[str] = SegformerConfig()
SCREAMING_SNAKE_CASE_ :Any = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE_ :str = 'huggingface/label-files'
if "segformer" in model_name:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE_ :Optional[Any] = 150
SCREAMING_SNAKE_CASE_ :Tuple = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (1, 150, 128, 128)
elif "city" in model_name:
SCREAMING_SNAKE_CASE_ :Optional[int] = 19
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 'cityscapes-id2label.json'
SCREAMING_SNAKE_CASE_ :str = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE_ :Dict = True
SCREAMING_SNAKE_CASE_ :Tuple = model_name[4:6]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1000
SCREAMING_SNAKE_CASE_ :str = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ :Optional[Any] = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
SCREAMING_SNAKE_CASE_ :List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ :int = idalabel
SCREAMING_SNAKE_CASE_ :Tuple = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :str = 256
elif size == "b2":
SCREAMING_SNAKE_CASE_ :Optional[Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :List[Any] = 768
SCREAMING_SNAKE_CASE_ :Optional[Any] = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE_ :List[str] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :Optional[Any] = 768
SCREAMING_SNAKE_CASE_ :Any = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE_ :List[Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :Optional[Any] = 768
SCREAMING_SNAKE_CASE_ :Any = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE_ :str = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :Optional[int] = 768
SCREAMING_SNAKE_CASE_ :str = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE_ :List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE , align=SCREAMING_SNAKE_CASE , do_random_crop=SCREAMING_SNAKE_CASE )
# prepare image
SCREAMING_SNAKE_CASE_ :Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ :List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE_ :Any = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
else:
SCREAMING_SNAKE_CASE_ :Dict = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
SCREAMING_SNAKE_CASE_ :List[str] = rename_keys(SCREAMING_SNAKE_CASE , encoder_only=SCREAMING_SNAKE_CASE )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE_ :Any = False
SCREAMING_SNAKE_CASE_ :Union[str, Any] = SegformerForImageClassification(SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :List[str] = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE_ :List[str] = model(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :Optional[int] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :str = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE_ :Tuple = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :str = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE_ :List[str] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :Any = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
SCREAMING_SNAKE_CASE_ :Dict = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 233
| 0
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : str = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
UpperCamelCase : Tuple = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any ):
lowerCamelCase__ = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCamelCase__ = int(re.match(R""".*layer_(\d*).*""" , __lowerCAmelCase )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def A__ ( __lowerCAmelCase : Tuple ):
if dtype == torch.bool:
return 1 / 8
lowerCamelCase__ = re.search(R"""[^\d](\d+)$""" , str(__lowerCAmelCase ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
lowerCamelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
# Construct model
if bloom_config_file == "":
lowerCamelCase__ = BloomConfig()
else:
lowerCamelCase__ = BloomConfig.from_json_file(__lowerCAmelCase )
if shard_model:
lowerCamelCase__ = os.listdir(__lowerCAmelCase )
lowerCamelCase__ = sorted(filter(lambda __lowerCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __lowerCAmelCase ) )
lowerCamelCase__ = {"""weight_map""": {}, """metadata""": {}}
lowerCamelCase__ = 0
lowerCamelCase__ = None
lowerCamelCase__ = BloomConfig()
for j, file in enumerate(__lowerCAmelCase ):
print("""Processing file: {}""".format(__lowerCAmelCase ) )
lowerCamelCase__ = None
for i in range(__lowerCAmelCase ):
# load all TP files
lowerCamelCase__ = file.replace("""model_00""" , F'''model_0{i}''' )
lowerCamelCase__ = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCamelCase__ = list(temp.keys() )
for key in keys:
lowerCamelCase__ = temp.pop(__lowerCAmelCase )
if tensors is None:
lowerCamelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase__ = torch.cat([tensors[key], temp[key]] , dim=__lowerCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase__ = tensors[key] / pretraining_tp
torch.save(
__lowerCAmelCase , os.path.join(
__lowerCAmelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__lowerCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCamelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCamelCase__ = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__lowerCAmelCase ) ).zfill(5 ) )
lowerCamelCase__ = BloomConfig()
lowerCamelCase__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowerCamelCase__ = total_size
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__lowerCAmelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + """\n"""
f.write(__lowerCAmelCase )
else:
lowerCamelCase__ = BloomModel(__lowerCAmelCase )
lowerCamelCase__ = os.listdir(__lowerCAmelCase )
lowerCamelCase__ = sorted(filter(lambda __lowerCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __lowerCAmelCase ) )
lowerCamelCase__ = None
for i, file in enumerate(__lowerCAmelCase ):
lowerCamelCase__ = None
for i in range(__lowerCAmelCase ):
# load all TP files
lowerCamelCase__ = file.replace("""model_00""" , F'''model_0{i}''' )
lowerCamelCase__ = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCamelCase__ = list(temp.keys() )
for key in keys:
lowerCamelCase__ = temp.pop(__lowerCAmelCase )
if tensors is None:
lowerCamelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase__ = torch.cat([tensors[key], temp[key]] , dim=__lowerCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase__ = tensors[key] / pretraining_tp
lowerCamelCase__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowerCamelCase__ = set(other_keys.missing_keys )
else:
lowerCamelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
lowerCamelCase__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowerCamelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 50
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 1
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
A_ : List[str] =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : str = CLIPConfig
SCREAMING_SNAKE_CASE__ : Dict = ["CLIPEncoderLayer"]
def __init__( self , a__ ):
super().__init__(__A )
_lowerCamelCase = CLIPVisionModelWithProjection(config.vision_config )
_lowerCamelCase = nn.Linear(config.vision_config.projection_dim , 1 )
_lowerCamelCase = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def snake_case_ ( self , a__ , a__ , a__=0.5 , a__=0.5 ):
_lowerCamelCase = self.vision_model(__A )[0]
_lowerCamelCase = self.p_head(__A )
_lowerCamelCase = nsfw_detected.flatten()
_lowerCamelCase = nsfw_detected > p_threshold
_lowerCamelCase = nsfw_detected.tolist()
if any(__A ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(__A ):
if nsfw_detected_:
_lowerCamelCase = np.zeros(images[idx].shape )
_lowerCamelCase = self.w_head(__A )
_lowerCamelCase = watermark_detected.flatten()
_lowerCamelCase = watermark_detected > w_threshold
_lowerCamelCase = watermark_detected.tolist()
if any(__A ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(__A ):
if watermark_detected_:
_lowerCamelCase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 718
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE_ ( )-> int:
_lowerCamelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_lowerCamelCase = Dataset.from_dict(snake_case )
return dataset
class __a ( lowerCAmelCase__ ):
def snake_case_ ( self ):
_lowerCamelCase = get_dataset()
_lowerCamelCase = make_duplicate_clusters(a__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def snake_case_ ( self ):
_lowerCamelCase = get_dataset()
_lowerCamelCase , _lowerCamelCase = deduplicate_dataset(a__ )
self.assertEqual(len(a__ ) , 2 )
print(a__ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , a__ )
| 222
| 0
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __UpperCamelCase :
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''prompt''']
__SCREAMING_SNAKE_CASE : Dict = inputs['''generator''']
__SCREAMING_SNAKE_CASE : str = inputs['''num_inference_steps''']
__SCREAMING_SNAKE_CASE : List[str] = inputs['''output_type''']
if "image" in inputs:
__SCREAMING_SNAKE_CASE : Any = inputs['''image''']
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
if "mask_image" in inputs:
__SCREAMING_SNAKE_CASE : List[str] = inputs['''mask_image''']
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
if "original_image" in inputs:
__SCREAMING_SNAKE_CASE : Optional[int] = inputs['''original_image''']
else:
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
__SCREAMING_SNAKE_CASE : List[Any] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = image
if mask_image is not None:
__SCREAMING_SNAKE_CASE : Any = mask_image
if original_image is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
__SCREAMING_SNAKE_CASE : Any = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''generator''']
__SCREAMING_SNAKE_CASE : Tuple = inputs['''num_inference_steps''']
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs['''output_type''']
# inputs with prompt converted to embeddings
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = image
if mask_image is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = mask_image
if original_image is not None:
__SCREAMING_SNAKE_CASE : str = original_image
__SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**_A )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1e-4 )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE : int = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe_loaded(**_A )[0]
__SCREAMING_SNAKE_CASE : Any = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1e-4 )
| 74
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-bert"
SCREAMING_SNAKE_CASE = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
SCREAMING_SNAKE_CASE = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = cached_file(lowerCAmelCase , lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase , lowerCAmelCase ) ) )
with open(os.path.join(lowerCAmelCase , "refs" , "main" ) ) as f:
UpperCAmelCase_ = f.read()
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , "snapshots" , lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(os.path.isfile(lowerCAmelCase ) )
# File is cached at the same place the second time.
UpperCAmelCase_ = cached_file(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Using a specific revision to test the full commit hash.
UpperCAmelCase_ = cached_file(lowerCAmelCase , lowerCAmelCase , revision="9b8c223" )
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , "snapshots" , lowerCAmelCase , lowerCAmelCase ) )
def A__ ( self ):
with self.assertRaisesRegex(lowerCAmelCase , "is not a valid model identifier" ):
UpperCAmelCase_ = cached_file("tiny-random-bert" , lowerCAmelCase )
with self.assertRaisesRegex(lowerCAmelCase , "is not a valid git identifier" ):
UpperCAmelCase_ = cached_file(lowerCAmelCase , lowerCAmelCase , revision="aaaa" )
with self.assertRaisesRegex(lowerCAmelCase , "does not appear to have a file named" ):
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" )
def A__ ( self ):
with self.assertRaisesRegex(lowerCAmelCase , "does not appear to have a file named" ):
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" )
with open(os.path.join(lowerCAmelCase , "refs" , "main" ) ) as f:
UpperCAmelCase_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , ".no_exist" , lowerCAmelCase , "conf" ) ) )
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" , local_files_only=lowerCAmelCase , _raise_exceptions_for_missing_entries=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase ) as mock_head:
UpperCAmelCase_ = cached_file(lowerCAmelCase , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase ) )
def A__ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowerCAmelCase , revision="ahaha" )
UpperCAmelCase_ = get_file_from_repo("bert-base-cased" , lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCAmelCase_ = json.loads(open(lowerCAmelCase , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = Path(lowerCAmelCase ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase , "a.txt" ) , str(lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase , "b.txt" ) )
| 579
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCamelCase_ : Any = get_logger(__name__)
def __lowercase( __snake_case : Optional[Any] ,__snake_case : Union[str, Any] ,__snake_case : Any ,__snake_case : Tuple ,__snake_case : int=0 ) -> str:
os.makedirs(__snake_case ,exist_ok=__snake_case )
with FSDP.state_dict_type(
__snake_case ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(__snake_case ,__snake_case )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__snake_case ,__snake_case )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(__snake_case ,__snake_case )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__snake_case ,__snake_case )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(__snake_case ,f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__snake_case ,exist_ok=__snake_case )
logger.info(f'''Saving model to {ckpt_dir}''' )
__snake_case = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__snake_case ,storage_writer=dist_cp.FileSystemWriter(__snake_case ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Model saved to {ckpt_dir}''' )
def __lowercase( __snake_case : int ,__snake_case : int ,__snake_case : Any ,__snake_case : Optional[int] ,__snake_case : Any=0 ) -> Dict:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__snake_case ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
__snake_case = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(__snake_case ,__snake_case )
logger.info(f'''Loading model from {input_model_file}''' )
__snake_case = torch.load(__snake_case )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(__snake_case ,__snake_case )
logger.info(f'''Loading model from {input_model_file}''' )
__snake_case = torch.load(__snake_case )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(__snake_case ,f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
__snake_case = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__snake_case ,storage_reader=dist_cp.FileSystemReader(__snake_case ) ,planner=DefaultLoadPlanner() ,)
__snake_case = state_dict['model']
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__snake_case )
def __lowercase( __snake_case : List[Any] ,__snake_case : Optional[Any] ,__snake_case : Union[str, Any] ,__snake_case : Optional[Any] ,__snake_case : Optional[int] ,__snake_case : Union[str, Any]=0 ) -> Dict:
os.makedirs(__snake_case ,exist_ok=__snake_case )
with FSDP.state_dict_type(
__snake_case ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(__snake_case ,__snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(__snake_case ,__snake_case )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__snake_case ,__snake_case )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(__snake_case ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__snake_case ,exist_ok=__snake_case )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} ,storage_writer=dist_cp.FileSystemWriter(__snake_case ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __lowercase( __snake_case : List[Any] ,__snake_case : Optional[Any] ,__snake_case : Optional[int] ,__snake_case : List[str] ,__snake_case : int ,__snake_case : Union[str, Any]=0 ) -> int:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__snake_case ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(__snake_case ,__snake_case )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(__snake_case )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(__snake_case ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key='optimizer' ,storage_reader=dist_cp.FileSystemReader(__snake_case ) ,)
__snake_case = optim_state['optimizer']
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(__snake_case ,__snake_case ,__snake_case )
optimizer.load_state_dict(__snake_case )
| 345
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase (lowerCamelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ):
__snake_case = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE_ , )
__snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case = {}
if accepts_eta:
__snake_case = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__snake_case = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
__snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# decode the image latents with the VAE
__snake_case = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 345
| 1
|
__lowerCamelCase : int = tuple[float, float, float]
__lowerCamelCase : int = tuple[float, float, float]
def _snake_case ( lowerCAmelCase : Pointad , lowerCAmelCase : Pointad ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = end_pointa[0] - end_pointa[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = end_pointa[1] - end_pointa[1]
SCREAMING_SNAKE_CASE_ : Tuple = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( lowerCAmelCase : Vectorad , lowerCAmelCase : Vectorad ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ab[1] * ac[2] - ab[2] * ac[1] # *i
SCREAMING_SNAKE_CASE_ : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
SCREAMING_SNAKE_CASE_ : Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( lowerCAmelCase : Vectorad , lowerCAmelCase : int ):
"""simple docstring"""
return tuple(round(lowerCAmelCase , lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def _snake_case ( lowerCAmelCase : Pointad , lowerCAmelCase : Pointad , lowerCAmelCase : Pointad , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = create_vector(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = create_vector(lowerCAmelCase , lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
| 216
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : complex , lowerCAmelCase : str = "x" , lowerCAmelCase : float = 1_0**-1_0 , lowerCAmelCase : int = 1 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = symbols(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = lambdify(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = lambdify(lowerCAmelCase , diff(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict = starting_point
while True:
if diff_function(lowerCAmelCase ) != 0:
SCREAMING_SNAKE_CASE_ : Tuple = prev_guess - multiplicity * func(lowerCAmelCase ) / diff_function(
lowerCAmelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 216
| 1
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
class a ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class a ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__lowerCAmelCase : Any = datasets.Audio()
__lowerCAmelCase : str = """audio"""
__lowerCAmelCase : Dict = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : List[Any] = AudioClassification(audio_column="""audio""" , label_column="""label""" )
UpperCAmelCase_ : int = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase_ : Dict = AUDIO_EXTENSIONS
| 700
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase_ ( A , A = True , A = math.inf , A = -math.inf , A = math.inf , A = -math.inf , A = False , A = 1_0_0 , A = 0.01 , A = 1 , ):
'''simple docstring'''
_a : int = False
_a : Optional[Any] = search_prob
_a : List[Any] = start_temperate
_a : Any = []
_a : List[Any] = 0
_a : Union[str, Any] = None
while not search_end:
_a : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_a : Optional[Any] = current_state
scores.append(A )
iterations += 1
_a : List[Any] = None
_a : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a : Optional[Any] = random.randint(0 , len(A ) - 1 ) # picking a random neighbor
_a : Optional[int] = neighbors.pop(A )
_a : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a : Tuple = picked_neighbor
else:
_a : Dict = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a : List[str] = picked_neighbor
_a : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a : str = True
else:
_a : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A ) , A )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : str = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
UpperCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
UpperCAmelCase_ : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 424
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__UpperCAmelCase = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any =["input_ids", "attention_mask"]
lowerCamelCase : Tuple =BartTokenizer
def __init__( self : str , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]="replace" , lowerCAmelCase : Dict="<s>" , lowerCAmelCase : Optional[Any]="</s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : Tuple="<s>" , lowerCAmelCase : Optional[Any]="<unk>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : str=False , lowerCAmelCase : List[str]=True , **lowerCAmelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
__lowerCAmelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
__lowerCAmelCase : List[str] = getattr(_a , pre_tok_state.pop("""type""" ) )
__lowerCAmelCase : Optional[int] = add_prefix_space
__lowerCAmelCase : int = pre_tok_class(**_a )
__lowerCAmelCase : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCAmelCase : str = """post_processor"""
__lowerCAmelCase : List[Any] = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
__lowerCAmelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase : Tuple = tuple(state["""sep"""] )
if "cls" in state:
__lowerCAmelCase : Tuple = tuple(state["""cls"""] )
__lowerCAmelCase : List[str] = False
if state.get("""add_prefix_space""" , _a ) != add_prefix_space:
__lowerCAmelCase : Dict = add_prefix_space
__lowerCAmelCase : Any = True
if state.get("""trim_offsets""" , _a ) != trim_offsets:
__lowerCAmelCase : Union[str, Any] = trim_offsets
__lowerCAmelCase : List[Any] = True
if changes_to_apply:
__lowerCAmelCase : Optional[int] = getattr(_a , state.pop("""type""" ) )
__lowerCAmelCase : Tuple = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
__lowerCAmelCase : List[Any] = value
def SCREAMING_SNAKE_CASE ( self : str , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = kwargs.get("""is_split_into_words""" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> int:
"""simple docstring"""
__lowerCAmelCase : str = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=None ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = [self.sep_token_id]
__lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 651
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665
| 0
|
"""simple docstring"""
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
SCREAMING_SNAKE_CASE = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE = max(len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ), b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 406
|
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
snake_case = 5
snake_case = 1_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str = SpeechaTextTokenizer
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Optional[int] = True
def A ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = sp.SentencePieceProcessor()
spm_model.Load(lowercase__ )
SCREAMING_SNAKE_CASE = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowercase__ ) )]
SCREAMING_SNAKE_CASE = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowercase__ ) , 1001 )
def A ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [289, 50, 14, 174, 386] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = "valhalla/s2t_mustc_multilinguial_medium"
UpperCAmelCase_ : Optional[Any] = "C'est trop cool"
UpperCAmelCase_ : List[str] = "Esto es genial"
@classmethod
def A ( cls ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def A ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def A ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE = [ES_CODE, 4, 1601, 47, 7647, 2]
SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'fr'
SCREAMING_SNAKE_CASE = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , lowercase__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 406
| 1
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 1000 ):
'''simple docstring'''
A_ = -1
A_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
A_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
A_ = n - a - b
if c * c == (a * a + b * b):
A_ = a * b * c
if candidate >= product:
A_ = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 203
|
from PIL import Image
def lowerCamelCase__ ( __A :Image ):
"""simple docstring"""
__snake_case , __snake_case = image.size
__snake_case = 0
__snake_case = image.load()
for i in range(__A ):
for j in range(__A ):
__snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__snake_case = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase__ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 268
| 0
|
from math import factorial, radians
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 18 , SCREAMING_SNAKE_CASE__ = 10) -> float:
__snake_case: Union[str, Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case: Union[str, Any] = radians(SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = angle_in_radians
__snake_case: Tuple = 3
__snake_case: int = -1
for _ in range(SCREAMING_SNAKE_CASE__):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__import__("doctest").testmod()
| 707
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
# load base model
__snake_case: str = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa)
# load LoRA weight from .safetensors
__snake_case: Dict = load_file(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case: Optional[int] = key.split(""".""")[0].split(LORA_PREFIX_TEXT_ENCODER + """_""")[-1].split("""_""")
__snake_case: Union[str, Any] = pipeline.text_encoder
else:
__snake_case: Optional[int] = key.split(""".""")[0].split(LORA_PREFIX_UNET + """_""")[-1].split("""_""")
__snake_case: List[Any] = pipeline.unet
# find the target layer
__snake_case: Optional[Any] = layer_infos.pop(0)
while len(SCREAMING_SNAKE_CASE__) > -1:
try:
__snake_case: Optional[Any] = curr_layer.__getattr__(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__snake_case: Optional[int] = layer_infos.pop(0)
elif len(SCREAMING_SNAKE_CASE__) == 0:
break
except Exception:
if len(SCREAMING_SNAKE_CASE__) > 0:
temp_name += "_" + layer_infos.pop(0)
else:
__snake_case: Tuple = layer_infos.pop(0)
__snake_case: Any = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up"""))
pair_keys.append(SCREAMING_SNAKE_CASE__)
else:
pair_keys.append(SCREAMING_SNAKE_CASE__)
pair_keys.append(key.replace("""lora_up""" , """lora_down"""))
# update weight
if len(state_dict[pair_keys[0]].shape) == 4:
__snake_case: Union[str, Any] = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).unsqueeze(2).unsqueeze(3)
else:
__snake_case: List[Any] = state_dict[pair_keys[0]].to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# update visited list
for item in pair_keys:
visited.append(SCREAMING_SNAKE_CASE__)
return pipeline
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Union[str, Any] = args.base_model_path
__UpperCAmelCase : str = args.checkpoint_path
__UpperCAmelCase : List[str] = args.dump_path
__UpperCAmelCase : Optional[int] = args.lora_prefix_unet
__UpperCAmelCase : Optional[int] = args.lora_prefix_text_encoder
__UpperCAmelCase : int = args.alpha
__UpperCAmelCase : List[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__UpperCAmelCase : Union[str, Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 155
| 0
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ) -> List[Any]:
UpperCamelCase_ = bp_numa
UpperCamelCase_ = bp_numa
UpperCamelCase_ = bp_numa
UpperCamelCase_ = conva_get[:2]
UpperCamelCase_ = conva_get[2]
UpperCamelCase_ = size_pa
UpperCamelCase_ = rate_w
UpperCamelCase_ = rate_t
UpperCamelCase_ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCamelCase_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase_ = -2 * np.random.rand(self.conva[1] ) + 1
UpperCamelCase_ = -2 * np.random.rand(self.num_bpa ) + 1
UpperCamelCase_ = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
# save model dict with pickle
UpperCamelCase_ = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(_UpperCAmelCase , 'wb' ) as f:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
print(f"""Model saved: {save_path}""" )
@classmethod
def _UpperCAmelCase ( cls , _UpperCAmelCase ) -> Optional[Any]:
# read saved model
with open(_UpperCAmelCase , 'rb' ) as f:
UpperCamelCase_ = pickle.load(_UpperCAmelCase ) # noqa: S301
UpperCamelCase_ = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
UpperCamelCase_ = model_dic.get('size_pooling1' )
UpperCamelCase_ = model_dic.get('num_bp1' )
UpperCamelCase_ = model_dic.get('num_bp2' )
UpperCamelCase_ = model_dic.get('num_bp3' )
UpperCamelCase_ = model_dic.get('rate_weight' )
UpperCamelCase_ = model_dic.get('rate_thre' )
# create model instance
UpperCamelCase_ = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# modify model parameter
UpperCamelCase_ = model_dic.get('w_conv1' )
UpperCamelCase_ = model_dic.get('wkj' )
UpperCamelCase_ = model_dic.get('vji' )
UpperCamelCase_ = model_dic.get('thre_conv1' )
UpperCamelCase_ = model_dic.get('thre_bp2' )
UpperCamelCase_ = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str:
return round(_UpperCAmelCase , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# convolution process
UpperCamelCase_ = convs[0]
UpperCamelCase_ = convs[1]
UpperCamelCase_ = np.shape(_UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
UpperCamelCase_ = []
for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
UpperCamelCase_ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCamelCase_ = []
UpperCamelCase_ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_UpperCAmelCase ):
UpperCamelCase_ = []
for i_focus in range(len(_UpperCAmelCase ) ):
UpperCamelCase_ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_UpperCAmelCase ) )
UpperCamelCase_ = np.asmatrix(_UpperCAmelCase ).reshape(
_UpperCAmelCase , _UpperCAmelCase )
data_featuremap.append(_UpperCAmelCase )
# expanding the data slice to One dimenssion
UpperCamelCase_ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) )
UpperCamelCase_ = np.asarray(_UpperCAmelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ) -> Tuple:
# pooling process
UpperCamelCase_ = len(featuremaps[0] )
UpperCamelCase_ = int(size_map / size_pooling )
UpperCamelCase_ = []
for i_map in range(len(_UpperCAmelCase ) ):
UpperCamelCase_ = featuremaps[i_map]
UpperCamelCase_ = []
for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_UpperCAmelCase ) )
UpperCamelCase_ = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase )
featuremap_pooled.append(_UpperCAmelCase )
return featuremap_pooled
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
# expanding three dimension data to one dimension list
UpperCamelCase_ = []
for i in range(len(_UpperCAmelCase ) ):
UpperCamelCase_ = np.shape(data[i] )
UpperCamelCase_ = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCamelCase_ = data_listed.getA().tolist()[0]
data_expanded.extend(_UpperCAmelCase )
UpperCamelCase_ = np.asarray(_UpperCAmelCase )
return data_expanded
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
# expanding matrix to one dimension list
UpperCamelCase_ = np.asarray(_UpperCAmelCase )
UpperCamelCase_ = np.shape(_UpperCAmelCase )
UpperCamelCase_ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = []
UpperCamelCase_ = 0
for i_map in range(_UpperCAmelCase ):
UpperCamelCase_ = np.ones((size_map, size_map) )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = pd_pool[
i_pool
]
UpperCamelCase_ = i_pool + 1
UpperCamelCase_ = np.multiply(
_UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_UpperCAmelCase )
return pd_all
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ) -> Any:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(_UpperCAmelCase )) )
print((' - - Shape: Teach_Data ', np.shape(_UpperCAmelCase )) )
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCamelCase_ = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCamelCase_ = np.asmatrix(datas_train[p] )
UpperCamelCase_ = np.asarray(datas_teach[p] )
UpperCamelCase_ , UpperCamelCase_ = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase_ = self.pooling(_UpperCAmelCase , self.size_poolinga )
UpperCamelCase_ = np.shape(_UpperCAmelCase )
UpperCamelCase_ = self._expand(_UpperCAmelCase )
UpperCamelCase_ = data_bp_input
UpperCamelCase_ = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa
UpperCamelCase_ = self.sig(_UpperCAmelCase )
UpperCamelCase_ = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa
UpperCamelCase_ = self.sig(_UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCamelCase_ = np.multiply(
(data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
UpperCamelCase_ = np.multiply(
np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
UpperCamelCase_ = np.dot(_UpperCAmelCase , self.vji )
UpperCamelCase_ = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCamelCase_ = pd_conva_pooled.T.getA().tolist()
UpperCamelCase_ = self._calculate_gradient_from_pool(
_UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCamelCase_ = self._expand_mat(pd_conva_all[k_conv] )
UpperCamelCase_ = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCamelCase_ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCamelCase_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCamelCase_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCamelCase_ = self.thre_bpa - pd_k_all * self.rate_thre
UpperCamelCase_ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCamelCase_ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCamelCase_ = rp + 1
UpperCamelCase_ = error_count / patterns
all_mse.append(_UpperCAmelCase )
def draw_error():
UpperCamelCase_ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_UpperCAmelCase , '+-' )
plt.plot(_UpperCAmelCase , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(_UpperCAmelCase , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]:
# model predict
UpperCamelCase_ = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(_UpperCAmelCase )) )
for p in range(len(_UpperCAmelCase ) ):
UpperCamelCase_ = np.asmatrix(datas_test[p] )
UpperCamelCase_ , UpperCamelCase_ = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase_ = self.pooling(_UpperCAmelCase , self.size_poolinga )
UpperCamelCase_ = self._expand(_UpperCAmelCase )
UpperCamelCase_ = data_bp_input
UpperCamelCase_ = bp_outa * self.vji.T - self.thre_bpa
UpperCamelCase_ = self.sig(_UpperCAmelCase )
UpperCamelCase_ = bp_outa * self.wkj.T - self.thre_bpa
UpperCamelCase_ = self.sig(_UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
UpperCamelCase_ = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out]
return np.asarray(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
# return the data of image after convoluting process so we can check it out
UpperCamelCase_ = np.asmatrix(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase_ = self.pooling(_UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 23
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase = """document_qa"""
lowercase = AutoProcessor
lowercase = VisionEncoderDecoderModel
lowercase = ["""image""", """text"""]
lowercase = ["""text"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCamelCase = task_prompt.replace("{user_input}" , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
UpperCamelCase = self.pre_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE , ).sequences
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
UpperCamelCase = re.sub(R"<.*?>" , "" , SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCamelCase = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 606
| 0
|
'''simple docstring'''
import math
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
UpperCAmelCase_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float = 1 / 12345 ) -> int:
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 3
while True:
UpperCAmelCase_ : str = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Tuple = int(SCREAMING_SNAKE_CASE__ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 644
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 1
|
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase__ = []
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 481
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' ,[
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__snake_case ,i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = _distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' ,[
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = _split_gen_kwargs(__snake_case ,__snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' ,[
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
lowerCamelCase__ = _number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected
| 481
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase_ : Any = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
snake_case = """openai-gpt"""
snake_case = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] , _snake_case : List[str]=40_478 , _snake_case : Dict=512 , _snake_case : Optional[int]=768 , _snake_case : Optional[Any]=12 , _snake_case : Optional[Any]=12 , _snake_case : Tuple="gelu" , _snake_case : List[str]=0.1 , _snake_case : int=0.1 , _snake_case : Dict=0.1 , _snake_case : List[Any]=1e-5 , _snake_case : List[str]=0.0_2 , _snake_case : Dict="cls_index" , _snake_case : Optional[Any]=True , _snake_case : Optional[int]=None , _snake_case : Any=True , _snake_case : Dict=0.1 , **_snake_case : Union[str, Any] , ) -> Dict:
"""simple docstring"""
A_ = vocab_size
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = afn
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = summary_type
A_ = summary_use_proj
A_ = summary_activation
A_ = summary_first_dropout
A_ = summary_proj_to_labels
super().__init__(**UpperCAmelCase_ )
| 702
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *_snake_case : int , **_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
pass
def A_ (__a ):
'''simple docstring'''
A_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ (__a ):
'''simple docstring'''
A_ = np.array(__a )
A_ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : int ) -> str:
"""simple docstring"""
A_ = MaskGenerationPipeline(model=_snake_case , image_processor=_snake_case )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : int , _snake_case : int , _snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
A_ = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
A_ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = "facebook/sam-vit-huge"
A_ = pipeline("mask-generation" , model=_snake_case )
A_ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
A_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_snake_case ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_0_5_3},
] , )
| 482
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :List[Any] , _snake_case :str=8 ) -> str:
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Tuple=512 , _snake_case :Any=512 ) -> Optional[Any]:
_A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A = np.array(pil_image.convert('''RGB''' ) )
_A = arr.astype(np.floataa ) / 127.5 - 1
_A = np.transpose(_snake_case , [2, 0, 1] )
_A = torch.from_numpy(_snake_case ).unsqueeze(0 )
return image
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : VQModel , ) -> Tuple:
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_ ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> Dict:
# get the original timestep using init_timestep
_A = min(int(num_inference_steps * strength ) , __lowerCAmelCase )
_A = max(num_inference_steps - init_timestep , 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=None ) -> List[Any]:
if not isinstance(__lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}''' )
_A = image.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
_A = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A = image
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_A = torch.cat(__lowerCAmelCase , dim=0 )
else:
_A = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_A = self.movq.config.scaling_factor * init_latents
_A = torch.cat([init_latents] , dim=0 )
_A = init_latents.shape
_A = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
# get latents
_A = self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = init_latents
return latents
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : str=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A = torch.device(f'''cuda:{gpu_id}''' )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : int , __lowerCAmelCase : str=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A = cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self : Union[str, Any] ) -> Any:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Any , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 1_00 , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : float = 0.3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> List[Any]:
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = torch.cat(__lowerCAmelCase , dim=0 )
_A = image_embeds.shape[0]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = torch.cat(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
_A = negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [image]
if not all(isinstance(__lowerCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_A = torch.cat([prepare_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for i in image] , dim=0 )
_A = image.to(dtype=image_embeds.dtype , device=__lowerCAmelCase )
_A = self.movq.encode(__lowerCAmelCase )['''latents''']
_A = latents.repeat_interleave(__lowerCAmelCase , dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
_A , _A = self.get_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A = downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
_A = self.prepare_latents(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {'''image_embeds''': image_embeds}
_A = self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A = noise_pred.chunk(2 )
_A , _A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
_A = self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 2
|
from torch import nn
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple ):
super().__init__()
__lowercase : Any = class_size
__lowercase : List[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : Dict = nn.Linear(_snake_case , _snake_case )
def snake_case_ ( self : Any , _snake_case : str ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__lowercase : Any = self.mlp(_snake_case )
return logits
| 509
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''pixel_values''']
def __init__( self : Union[str, Any] ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : bool = True ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,default_to_square=A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Union[str, Any] ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : str ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : np.ndarray ,A_ : Union[int, float] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Union[str, Any] ,) -> Union[str, Any]:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[Any] ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : ImageInput ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : int = None ,A_ : bool = None ,A_ : float = None ,A_ : bool = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : bool = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**A_ : Dict ,) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,param_name='size' ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' ,default_to_square=A_ )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
| 22
|
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCAmelCase : List[Any] ={"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowerCAmelCase : int =pd.read_csv("""sample_data.csv""", header=None)
__lowerCAmelCase : Optional[Any] =df.shape[:1][0]
# If you're using some other dataset input the target column
__lowerCAmelCase : Optional[int] =df.iloc[:, 1:2]
__lowerCAmelCase : List[str] =actual_data.values.reshape(len_data, 1)
__lowerCAmelCase : int =MinMaxScaler().fit_transform(actual_data)
__lowerCAmelCase : List[Any] =1_0
__lowerCAmelCase : int =5
__lowerCAmelCase : str =2_0
__lowerCAmelCase : Union[str, Any] =len_data - periods * look_back
__lowerCAmelCase : Dict =actual_data[:division]
__lowerCAmelCase : List[str] =actual_data[division - look_back :]
__lowerCAmelCase , __lowerCAmelCase : List[str] =[], []
__lowerCAmelCase , __lowerCAmelCase : Optional[int] =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowerCAmelCase : int =np.array(train_x)
__lowerCAmelCase : List[str] =np.array(test_x)
__lowerCAmelCase : Dict =np.array([list(i.ravel()) for i in train_y])
__lowerCAmelCase : str =np.array([list(i.ravel()) for i in test_y])
__lowerCAmelCase : Optional[Any] =Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
__lowerCAmelCase : Optional[int] =model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__lowerCAmelCase : Dict =model.predict(x_test)
| 359
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'sentencepiece.model'}
UpperCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCamelCase = {
'google/rembert': 256,
}
class _A ( UpperCAmelCase_ ):
lowercase_ : Dict = VOCAB_FILES_NAMES
lowercase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Union[str, Any]="[CLS]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : List[Any]="[UNK]" , lowerCamelCase__ : List[Any]="[SEP]" , lowerCamelCase__ : Any="[PAD]" , lowerCamelCase__ : Any="[CLS]" , lowerCamelCase__ : int="[MASK]" , **lowerCamelCase__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Optional[Any] = do_lower_case
__UpperCamelCase : Union[str, Any] = remove_space
__UpperCamelCase : List[Any] = keep_accents
__UpperCamelCase : List[str] = vocab_file
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCamelCase__ )
@property
def a ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model )
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
__UpperCamelCase : Tuple = self.__dict__.copy()
__UpperCamelCase : Union[str, Any] = None
return state
def __setstate__( self : Dict , lowerCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCamelCase : int = d
__UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any]=False ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.sp_model.EncodeAsPieces(lowerCamelCase__ )
return pieces
def a ( self : List[str] , lowerCamelCase__ : Dict ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase__ )
def a ( self : Dict , lowerCamelCase__ : int ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase__ )
def a ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.sp_model.decode_pieces(lowerCamelCase__ )
return out_string
def a ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
def a ( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [self.sep_token_id]
__UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
__UpperCamelCase : List[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 701
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 515
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class lowerCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase = "rwkv"
_lowerCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self ,lowerCamelCase_=5_0_2_7_7 ,lowerCamelCase_=1_0_2_4 ,lowerCamelCase_=4_0_9_6 ,lowerCamelCase_=3_2 ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=1E-5 ,lowerCamelCase_=0 ,lowerCamelCase_=0 ,lowerCamelCase_=6 ,lowerCamelCase_=False ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> Union[str, Any]:
A = vocab_size
A = context_length
A = hidden_size
A = num_hidden_layers
A = attention_hidden_size if attention_hidden_size is not None else hidden_size
A = intermediate_size if intermediate_size is not None else 4 * hidden_size
A = layer_norm_epsilon
A = rescale_every
A = use_cache
A = bos_token_id
A = eos_token_id
super().__init__(
tie_word_embeddings=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
| 617
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a__ : Union[str, Any] =logging.get_logger(__name__)
def lowercase__ ( __lowercase : bool , __lowercase : bool ) -> Any:
"""simple docstring"""
def run_func(__lowercase : Tuple ):
@wraps(__lowercase )
def run_in_eager_mode(*__lowercase : Any , **__lowercase : Any ):
return func(*__lowercase , **__lowercase )
@wraps(__lowercase )
@tf.function(experimental_compile=__lowercase )
def run_in_graph_mode(*__lowercase : List[str] , **__lowercase : Optional[int] ):
return func(*__lowercase , **__lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> ["tf.Tensor"]:
"""simple docstring"""
__UpperCamelCase = random.Random()
__UpperCamelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowercase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : TensorFlowBenchmarkArguments
SCREAMING_SNAKE_CASE_ : PretrainedConfig
SCREAMING_SNAKE_CASE_ : str ="TensorFlow"
@property
def _lowerCamelCase ( self : List[str] ):
return tf.__version__
def _lowerCamelCase ( self : Union[str, Any] , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
__UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCamelCase = self._prepare_inference_func(__A , __A , __A )
return self._measure_speed(_inference )
def _lowerCamelCase ( self : Dict , __A : str , __A : int , __A : int ):
__UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCamelCase = self._prepare_train_func(__A , __A , __A )
return self._measure_speed(_train )
def _lowerCamelCase ( self : str , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
__UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCamelCase = self._prepare_inference_func(__A , __A , __A )
return self._measure_memory(_inference )
def _lowerCamelCase ( self : Dict , __A : str , __A : int , __A : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
__UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__UpperCamelCase = self._prepare_train_func(__A , __A , __A )
return self._measure_memory(_train )
def _lowerCamelCase ( self : Union[str, Any] , __A : str , __A : int , __A : int ):
__UpperCamelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__UpperCamelCase = (
hasattr(__A , 'architectures' )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCamelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCamelCase = __import__('transformers' , fromlist=[model_class] )
__UpperCamelCase = getattr(__A , __A )
__UpperCamelCase = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__UpperCamelCase = TF_MODEL_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
__UpperCamelCase = config.vocab_size if hasattr(__A , 'vocab_size' ) else config.encoder.vocab_size
__UpperCamelCase = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__A , decoder_input_ids=__A , training=__A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__A , training=__A )
__UpperCamelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCamelCase ( self : Any , __A : str , __A : int , __A : int ):
__UpperCamelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__UpperCamelCase = (
hasattr(__A , 'architectures' )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCamelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCamelCase = __import__('transformers' , fromlist=[model_class] )
__UpperCamelCase = getattr(__A , __A )
__UpperCamelCase = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__UpperCamelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
__UpperCamelCase = config.vocab_size if hasattr(__A , 'vocab_size' ) else config.encoder.vocab_size
__UpperCamelCase = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__UpperCamelCase = model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0]
__UpperCamelCase = tf.gradients(__A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__UpperCamelCase = model(__A , labels=__A , training=__A )[0]
__UpperCamelCase = tf.gradients(__A , model.trainable_variables )
return gradients
__UpperCamelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCamelCase ( self : List[Any] , __A : List[Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(__A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__UpperCamelCase = timeit.repeat(
__A , repeat=self.args.repeat , number=1_0 , )
return min(__A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCamelCase ( self : Optional[int] , __A : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__UpperCamelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__UpperCamelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__UpperCamelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__UpperCamelCase = nvml.nvmlDeviceGetMemoryInfo(__A )
__UpperCamelCase = meminfo.used
__UpperCamelCase = Memory(__A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__UpperCamelCase = None
else:
__UpperCamelCase = measure_peak_memory_cpu(__A )
__UpperCamelCase = Memory(__A ) if isinstance(__A , __A ) else memory_bytes
if self.args.trace_memory_line_by_line:
__UpperCamelCase = stop_memory_tracing(__A )
if memory is None:
__UpperCamelCase = summary.total
else:
__UpperCamelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 399
| 0
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def snake_case ( UpperCAmelCase : List[Any] ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase, UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCAmelCase )
def snake_case ( UpperCAmelCase : Union[str, Any] ):
A , A = emb.weight.shape
A = nn.Linear(UpperCAmelCase, UpperCAmelCase, bias=UpperCAmelCase )
A = emb.weight.data
return lin_layer
def snake_case ( UpperCAmelCase : str, UpperCAmelCase : str ):
A = torch.load(UpperCAmelCase, map_location='cpu' )
A = mam_aaa['args']
A = mam_aaa['model']
A = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
A = state_dict['decoder.embed_tokens.weight'].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(',' )]
A = SpeechaTextConfig(
vocab_size=UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', num_conv_layers=len(UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=UpperCAmelCase, num_beams=5, max_length=2_00, use_cache=UpperCAmelCase, decoder_start_token_id=2, early_stopping=UpperCAmelCase, )
A = SpeechaTextForConditionalGeneration(UpperCAmelCase )
A , A = model.model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f' but all the following weights are missing {missing}' )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 721
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = JukeboxTokenizer
snake_case = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def A( self : Optional[int] ) -> Any:
'''simple docstring'''
import torch
A = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
A = tokenizer(**self.metas )['input_ids']
# fmt: off
A = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def A( self : Tuple ) -> List[Any]:
'''simple docstring'''
import torch
A = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
A = tokenizer(**self.metas )['input_ids']
# fmt: off
A = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 110
| 0
|
"""simple docstring"""
def A_ ( __lowercase ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
UpperCamelCase_ : List[str] =[True] * (num + 1)
UpperCamelCase_ : Dict =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCAmelCase_ ):
UpperCamelCase_ : int =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 357
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct_text_model'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int]=5_0_2_4_4 , __lowerCamelCase : int=7_6_8 , __lowerCamelCase : Union[str, Any]=6_4 , __lowerCamelCase : Dict=2_0_4_8 , __lowerCamelCase : int=1_2 , __lowerCamelCase : Any=1_2 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Any=1_2_8 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : str=1e-6 , __lowerCamelCase : int=1.0 , __lowerCamelCase : Optional[int]="gelu_new" , __lowerCamelCase : int=0 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Dict , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = d_kv
UpperCAmelCase = d_ff
UpperCAmelCase = num_layers
UpperCAmelCase = num_heads
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = dropout_rate
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_factor
UpperCAmelCase = use_cache
UpperCAmelCase = eos_token_id
UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , is_decoder=__lowerCamelCase , **__lowerCamelCase , )
@classmethod
def _lowercase ( cls : List[Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct_vision_model'''
def __init__( self : str , __lowerCamelCase : Any=7_6_8 , __lowerCamelCase : Dict=7_6_8 , __lowerCamelCase : List[Any]=2_0_4_8 , __lowerCamelCase : List[Any]=6_4 , __lowerCamelCase : Union[str, Any]=1_2 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Tuple=1e-6 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=1e-1_0 , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : Any=4_0_9_6 , __lowerCamelCase : Dict=3_2 , __lowerCamelCase : Any=1_2_8 , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
UpperCAmelCase = hidden_size
UpperCAmelCase = patch_embed_hidden_size
UpperCAmelCase = d_ff
UpperCAmelCase = dropout_rate
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_factor
UpperCAmelCase = attention_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = dense_act_fn
UpperCAmelCase = seq_len
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = d_kv
@classmethod
def _lowercase ( cls : Any , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : int ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct'''
UpperCamelCase = True
def __init__( self : str , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=True , **__lowerCamelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
if text_config is None:
UpperCAmelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase = PixaStructTextConfig(**__lowerCamelCase )
UpperCAmelCase = PixaStructVisionConfig(**__lowerCamelCase )
UpperCAmelCase = self.text_config.decoder_start_token_id
UpperCAmelCase = self.text_config.pad_token_id
UpperCAmelCase = self.text_config.eos_token_id
UpperCAmelCase = initializer_factor
UpperCAmelCase = initializer_range
UpperCAmelCase = self.initializer_range
UpperCAmelCase = self.initializer_range
UpperCAmelCase = is_vqa
@classmethod
def _lowercase ( cls : List[Any] , __lowerCamelCase : PixaStructTextConfig , __lowerCamelCase : PixaStructVisionConfig , **__lowerCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 377
| 0
|
import os
from datetime import datetime as dt
from github import Github
_A = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case = Github(os.environ["GITHUB_TOKEN"] )
snake_case = g.get_repo("huggingface/diffusers" )
snake_case = repo.get_issues(state="open" )
for issue in open_issues:
snake_case = sorted(issue.get_comments() , key=lambda A__ : i.created_at , reverse=snake_case_ )
snake_case = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 703
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__(self : Optional[int] , _A : Dict , _A : Optional[int]=7 , _A : Union[str, Any]=3 , _A : Tuple=1_8 , _A : str=3_0 , _A : Union[str, Any]=4_0_0 , _A : Optional[Any]=True , _A : Union[str, Any]=None , _A : str=True , _A : Optional[int]=None , ) -> int:
snake_case = size if size is not None else {"shortest_edge": 2_0}
snake_case = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_center_crop
snake_case = crop_size
def UpperCAmelCase(self : str ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase(self : Dict ) -> List[Any]:
snake_case = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase(self : Union[str, Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase(self : Tuple ) -> Tuple:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "do_center_crop" ) )
self.assertTrue(hasattr(_A , "crop_size" ) )
def UpperCAmelCase(self : int ) -> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def UpperCAmelCase(self : Any ) -> Tuple:
pass
def UpperCAmelCase(self : Tuple ) -> Tuple:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase(self : Tuple ) -> List[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase(self : List[str] ) -> List[str]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 294
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = UnCLIPImageVariationPipeline
UpperCamelCase_ : Optional[int] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCamelCase_ : int = IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCamelCase_ : Optional[int] = False
@property
def _A ( self : Optional[int] ):
return 32
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Dict ):
return self.time_input_dim
@property
def _A ( self : Tuple ):
return self.time_input_dim * 4
@property
def _A ( self : Tuple ):
return 100
@property
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _A ( self : List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCAmelCase_ )
@property
def _A ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCAmelCase_ )
@property
def _A ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
SCREAMING_SNAKE_CASE : Optional[int] = UnCLIPTextProjModel(**UpperCAmelCase_ )
return model
@property
def _A ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _A ( self : int ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _A ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _A ( self : List[str] ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_decoder
SCREAMING_SNAKE_CASE : str = self.dummy_text_proj
SCREAMING_SNAKE_CASE : List[str] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : int = self.dummy_super_res_first
SCREAMING_SNAKE_CASE : List[str] = self.dummy_super_res_last
SCREAMING_SNAKE_CASE : Optional[Any] = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , )
SCREAMING_SNAKE_CASE : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _A ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[str]=True ):
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
if pil_image:
SCREAMING_SNAKE_CASE : Optional[int] = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : Tuple = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : int = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[Any] = "cpu"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Dict = "cpu"
SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = "cpu"
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
SCREAMING_SNAKE_CASE : Any = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
SCREAMING_SNAKE_CASE : Any = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : str = torch.device("cpu" )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[int] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.decoder.dtype
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
SCREAMING_SNAKE_CASE : List[Any] = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE : int = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
SCREAMING_SNAKE_CASE : List[str] = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ ).images
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
# Don't pass image, instead pass embedding
SCREAMING_SNAKE_CASE : Tuple = pipeline_inputs.pop("image" )
SCREAMING_SNAKE_CASE : Any = pipe.image_encoder(UpperCAmelCase_ ).image_embeds
SCREAMING_SNAKE_CASE : int = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ , image_embeddings=UpperCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[int] = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
SCREAMING_SNAKE_CASE : Tuple = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase_ , expected_max_diff=UpperCAmelCase_ )
@skip_mps
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = torch_device == "cpu"
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : str = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
SCREAMING_SNAKE_CASE : Optional[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCAmelCase_ )
@skip_mps
def _A ( self : Optional[Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _A ( self : int ):
return super().test_save_load_local()
@skip_mps
def _A ( self : Tuple ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
SCREAMING_SNAKE_CASE : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
SCREAMING_SNAKE_CASE : Union[str, Any] = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="np" , )
SCREAMING_SNAKE_CASE : str = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ , 15 )
| 62
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Tuple ={
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple =["""MobileNetV2FeatureExtractor"""]
_lowerCAmelCase : Optional[int] =["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] =[
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a_ : Optional[Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def __lowerCAmelCase ( _UpperCamelCase : Any=None ) -> Dict:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('config' , description=_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate config command' , description=_UpperCamelCase )
parser.add_argument(
'--config_file' , default=_UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_UpperCamelCase )
else:
config.to_yaml_file(_UpperCamelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = config_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(_UpperCamelCase )
if __name__ == "__main__":
main()
| 673
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def __lowerCAmelCase ( A ):
if hor == 128:
UpperCAmelCase_ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCAmelCase_ = (32, 128, 256)
UpperCAmelCase_ = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
UpperCAmelCase_ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
UpperCAmelCase_ = (32, 64, 128, 256)
UpperCAmelCase_ = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
UpperCAmelCase_ = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
UpperCAmelCase_ = UNetaDModel(**A )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase_ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase_ = state_dict.pop(A )
hf_value_function.load_state_dict(A )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(A , A )
def __lowerCAmelCase ( ):
UpperCAmelCase_ = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
UpperCAmelCase_ = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
UpperCAmelCase_ = model
UpperCAmelCase_ = UNetaDModel(**A )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase_ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase_ = state_dict.pop(A )
hf_value_function.load_state_dict(A )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(A , A )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 162
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
if "emb" in name:
__lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple[Dict, Dict]:
__lowerCAmelCase = list(state_dict.keys() )
__lowerCAmelCase = {}
for key in keys:
__lowerCAmelCase = state_dict.pop(lowercase )
__lowerCAmelCase = rename_keys(lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase = val[:hidden_size, :]
__lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase = val
else:
__lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( lowercase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCAmelCase = 1024
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif checkpoint == "medium":
__lowerCAmelCase = 1536
__lowerCAmelCase = 48
__lowerCAmelCase = 24
elif checkpoint == "large":
__lowerCAmelCase = 2048
__lowerCAmelCase = 48
__lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase , num_attention_heads=lowercase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase="cpu" ) -> Optional[Any]:
__lowerCAmelCase = MusicGen.get_pretrained(lowercase , device=lowercase )
__lowerCAmelCase = decoder_config_from_checkpoint(lowercase )
__lowerCAmelCase = fairseq_model.lm.state_dict()
__lowerCAmelCase , __lowerCAmelCase = rename_state_dict(
lowercase , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowerCAmelCase = MusicgenForCausalLM(lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase , __lowerCAmelCase = decoder.load_state_dict(lowercase , strict=lowercase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase )
if len(lowercase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=lowercase , audio_encoder=lowercase , decoder=lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase )
# check we can do a forward pass
__lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__lowerCAmelCase = MusicgenProcessor(feature_extractor=lowercase , tokenizer=lowercase )
# set the appropriate bos/pad token ids
__lowerCAmelCase = 2048
__lowerCAmelCase = 2048
# set other default generation config params
__lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCAmelCase = True
__lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase )
processor.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_a : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 1
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __snake_case (_a ):
lowerCAmelCase__ = ""
lowerCAmelCase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase__ = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Dict , _UpperCAmelCase : str = "" , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[dict] = None , **_UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
super().__init__(self , **_UpperCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowerCAmelCase : Union[str, Any] = fsspec.open(
_UpperCAmelCase , mode="""rb""" , protocol=_UpperCAmelCase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowerCAmelCase : str = os.path.basename(self.file.path.split("""::""" )[0] )
_lowerCAmelCase : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
_lowerCAmelCase : Tuple = None
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
return super()._strip_protocol(_UpperCAmelCase ).lstrip("""/""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
if self.dir_cache is None:
_lowerCAmelCase : int = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
_lowerCAmelCase : Tuple = {f["""name"""]: f}
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return self.file.open().read()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str = "rb" , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=None , **_UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = self._strip_protocol(_UpperCAmelCase )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __snake_case (_a ):
lowerCAmelCase__ = "bz2"
lowerCAmelCase__ = "bz2"
lowerCAmelCase__ = ".bz2"
class __snake_case (_a ):
lowerCAmelCase__ = "gzip"
lowerCAmelCase__ = "gzip"
lowerCAmelCase__ = ".gz"
class __snake_case (_a ):
lowerCAmelCase__ = "lz4"
lowerCAmelCase__ = "lz4"
lowerCAmelCase__ = ".lz4"
class __snake_case (_a ):
lowerCAmelCase__ = "xz"
lowerCAmelCase__ = "xz"
lowerCAmelCase__ = ".xz"
class __snake_case (_a ):
lowerCAmelCase__ = "zstd"
lowerCAmelCase__ = "zstd"
lowerCAmelCase__ = ".zst"
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : str = "rb" , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[dict] = None , _UpperCAmelCase : int = DEFAULT_BLOCK_SIZE , **_UpperCAmelCase : str , ) -> List[str]:
'''simple docstring'''
super().__init__(
fo=_UpperCAmelCase , mode=_UpperCAmelCase , target_protocol=_UpperCAmelCase , target_options=_UpperCAmelCase , block_size=_UpperCAmelCase , **_UpperCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowerCAmelCase : Optional[Any] = self.file.__enter__
class __snake_case :
def __init__( self : Dict , _UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = file_
def __enter__( self : int ) -> Optional[Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
self._file.__exit__(*_UpperCAmelCase , **_UpperCAmelCase )
def __iter__( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return iter(self._file )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
'''simple docstring'''
return next(self._file )
def __getattr__( self : Optional[int] , _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
return getattr(self._file , _UpperCAmelCase )
def fixed_enter(*_UpperCAmelCase : str , **_UpperCAmelCase : Optional[int] ):
return WrappedFile(_enter(*_UpperCAmelCase , **_UpperCAmelCase ) )
_lowerCAmelCase : Optional[int] = fixed_enter
| 429
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class __snake_case (_a ):
lowerCAmelCase__ = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase__ = Features({"audio": Audio()} )
lowerCAmelCase__ = Features({"labels": ClassLabel} )
lowerCAmelCase__ = "audio"
lowerCAmelCase__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : Any = copy.deepcopy(self )
_lowerCAmelCase : Tuple = self.label_schema.copy()
_lowerCAmelCase : List[Any] = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 429
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73
| 1
|
from copy import deepcopy
class lowercase_ :
'''simple docstring'''
def __init__( self : int , __UpperCAmelCase : list[int] | None = None , __UpperCAmelCase : int | None = None ) ->Any:
"""simple docstring"""
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : list[int] ) ->Union[str, Any]:
"""simple docstring"""
a = len(_a )
a = deepcopy(_a )
for i in range(1 , self.size ):
a = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : int ) ->Union[str, Any]:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : int ) ->Optional[Any]:
"""simple docstring"""
return index - (index & (-index))
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->List[Any]:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(_a )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->Tuple:
"""simple docstring"""
self.add(_a , value - self.get(_a ) )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : int ) ->Any:
"""simple docstring"""
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(_a )
return result
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->Optional[Any]:
"""simple docstring"""
return self.prefix(_a ) - self.prefix(_a )
def __lowerCAmelCase ( self : str , __UpperCAmelCase : int ) ->Union[str, Any]:
"""simple docstring"""
return self.query(_a , index + 1 )
def __lowerCAmelCase ( self : str , __UpperCAmelCase : int ) ->Union[str, Any]:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
_UpperCAmelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCAmelCase ) )
return round(__lowerCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class _UpperCAmelCase ( _lowerCamelCase ):
a = '''markuplm'''
def __init__( self , a__=30522 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.02 , a__=1E-12 , a__=0 , a__=0 , a__=2 , a__=256 , a__=1024 , a__=216 , a__=1001 , a__=32 , a__=50 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ , )
A_ : Dict = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Any = hidden_act
A_ : Union[str, Any] = intermediate_size
A_ : List[Any] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = layer_norm_eps
A_ : List[str] = position_embedding_type
A_ : Optional[Any] = use_cache
A_ : int = classifier_dropout
# additional properties
A_ : Any = max_depth
A_ : Tuple = max_xpath_tag_unit_embeddings
A_ : List[Any] = max_xpath_subs_unit_embeddings
A_ : Tuple = tag_pad_id
A_ : Optional[int] = subs_pad_id
A_ : List[str] = xpath_unit_hidden_size
| 569
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCAmelCase = """true"""
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=8_2 ,_lowerCAmelCase=1_6 ):
'''simple docstring'''
set_seed(4_2 )
A_ : List[Any] = RegressionModel()
A_ : int = deepcopy(_lowerCAmelCase )
A_ : Union[str, Any] = RegressionDataset(length=_lowerCAmelCase )
A_ : Union[str, Any] = DataLoader(_lowerCAmelCase ,batch_size=_lowerCAmelCase )
model.to(accelerator.device )
A_ , A_ : int = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
return model, ddp_model, dataloader
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
A_ : Tuple = load_dataset("""glue""" ,"""mrpc""" ,split="""validation""" )
def tokenize_function(_lowerCAmelCase ):
A_ : Tuple = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
A_ : List[Any] = dataset.map(
_lowerCAmelCase ,batched=_lowerCAmelCase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
A_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_lowerCAmelCase ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase ,padding="""longest""" ,return_tensors="""pt""" )
return tokenizer.pad(_lowerCAmelCase ,padding="""max_length""" ,max_length=1_2_8 ,return_tensors="""pt""" )
return DataLoader(_lowerCAmelCase ,shuffle=_lowerCAmelCase ,collate_fn=_lowerCAmelCase ,batch_size=1_6 )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : int = Accelerator(dispatch_batches=_lowerCAmelCase ,split_batches=_lowerCAmelCase )
A_ : List[Any] = get_dataloader(_lowerCAmelCase ,not dispatch_batches )
A_ : Any = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" ,return_dict=_lowerCAmelCase )
A_ , A_ : int = accelerator.prepare(_lowerCAmelCase ,_lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = []
for batch in dataloader:
A_ , A_ : List[str] = batch.values()
with torch.no_grad():
A_ : Tuple = model(_lowerCAmelCase )
A_ , A_ : List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ : int = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
A_ , A_ : Optional[int] = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=8_2 ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=1_6 ):
'''simple docstring'''
A_ , A_ , A_ : Union[str, Any] = get_basic_setup(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
A_ , A_ : Union[str, Any] = generate_predictions(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}"""
def _lowerCAmelCase ( _lowerCAmelCase = False ,_lowerCAmelCase = False ):
'''simple docstring'''
A_ : Any = evaluate.load("""glue""" ,"""mrpc""" )
A_ , A_ : Optional[Any] = get_mrpc_setup(_lowerCAmelCase ,_lowerCAmelCase )
# First do baseline
A_ , A_ , A_ : Any = setup["""no"""]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
A_ : List[str] = model(**_lowerCAmelCase )
A_ : int = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase ,references=batch["""labels"""] )
A_ : List[str] = metric.compute()
# Then do distributed
A_ , A_ , A_ : int = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ : Optional[Any] = model(**_lowerCAmelCase )
A_ : Any = outputs.logits.argmax(dim=-1 )
A_ : int = batch["""labels"""]
A_ , A_ : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase ,references=_lowerCAmelCase )
A_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : str = Accelerator(split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_lowerCAmelCase ,_lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ : Optional[int] = Accelerator(split_batches=_lowerCAmelCase ,dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_lowerCAmelCase ,9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
A_ : Union[str, Any] = Accelerator()
test_torch_metrics(_lowerCAmelCase ,5_1_2 )
accelerator.state._reset_state()
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 569
| 1
|
'''simple docstring'''
import sys
__snake_case = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
for digit in s:
product *= int(__a )
return product
def a ( __a = N ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = -sys.maxsize - 1
UpperCamelCase__ :Dict = n[:13]
UpperCamelCase__ :Dict = 13
while cur_index < len(__a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCamelCase__ :Dict = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCamelCase__ :List[str] = max(__a , str_eval(__a ) )
UpperCamelCase__ :Union[str, Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709
|
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ = "" , UpperCamelCase_ = False ):
'''simple docstring'''
UpperCamelCase__ :dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ :List[Any] = is_leaf
UpperCamelCase__ :Dict = prefix
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = 0
for q, w in zip(self.prefix , UpperCamelCase_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for word in words:
self.insert(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.prefix == word:
UpperCamelCase__ :Tuple = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ :Optional[Any] = RadixNode(prefix=UpperCamelCase_ , is_leaf=UpperCamelCase_ )
else:
UpperCamelCase__ :Optional[Any] = self.nodes[word[0]]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = incoming_node.match(
UpperCamelCase_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCamelCase_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ :str = remaining_prefix
UpperCamelCase__ :Optional[Any] = self.nodes[matching_string[0]]
UpperCamelCase__ :Dict = RadixNode(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = aux_node
if remaining_word == "":
UpperCamelCase__ :List[Any] = True
else:
self.nodes[matching_string[0]].insert(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.nodes.get(word[0] , UpperCamelCase_ )
if not incoming_node:
return False
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = incoming_node.match(
UpperCamelCase_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = self.nodes.get(word[0] , UpperCamelCase_ )
if not incoming_node:
return False
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = incoming_node.match(
UpperCamelCase_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCamelCase_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase__ :Tuple = list(self.nodes.values() )[0]
UpperCamelCase__ :Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ :Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase__ :List[str] = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ :Optional[int] = list(incoming_node.nodes.values() )[0]
UpperCamelCase__ :int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ :Optional[int] = merging_node.nodes
return True
def lowerCAmelCase__ ( self , UpperCamelCase_ = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ) -> bool:
'''simple docstring'''
UpperCamelCase__ :Tuple = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase__ :Tuple = RadixNode()
root.insert_many(__a )
assert all(root.find(__a ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ) -> None:
'''simple docstring'''
assert test_trie()
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = RadixNode()
UpperCamelCase__ :List[Any] = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__a )
print('''Words:''' , __a )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 280
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__A : List[Any] = logging.get_logger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if "." in tensor_name:
_A = tensor_name.split('.' )
for split in splits[:-1]:
_A = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_A = new_module
_A = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_A = tensor_name in module._buffers
_A = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_A = False
_A = False
if is_buffer or not is_bitsandbytes_available():
_A = False
_A = False
else:
_A = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_A = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_A = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_A = old_value.to(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_A = value.to('cpu' )
if value.dtype == torch.inta:
_A = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
_A = torch.tensor(_SCREAMING_SNAKE_CASE , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _SCREAMING_SNAKE_CASE ) and fpaa_statistics is None:
_A = new_value.T
_A = old_value.__dict__
if is_abit:
_A = bnb.nn.IntaParams(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
elif is_abit:
_A = bnb.nn.Paramsabit(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_A = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(_SCREAMING_SNAKE_CASE ) )
else:
if value is None:
_A = old_value.to(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_A = value.to(_SCREAMING_SNAKE_CASE )
else:
_A = torch.tensor(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
if is_buffer:
_A = new_value
else:
_A = nn.Parameter(_SCREAMING_SNAKE_CASE , requires_grad=old_value.requires_grad )
_A = new_value
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
_A = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if (isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) or isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_SCREAMING_SNAKE_CASE ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A, _A = module.weight.shape
else:
_A = module.in_features
_A = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_A = bnb.nn.LinearabitLt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_A = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_A = bnb.nn.Linearabit(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_A = True
# Store the module class in case we need to transpose the weight later
_A = type(_SCREAMING_SNAKE_CASE )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_SCREAMING_SNAKE_CASE )
if len(list(module.children() ) ) > 0:
_A, _A = _replace_with_bnb_linear(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_been_replaced=_SCREAMING_SNAKE_CASE , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
_A = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
_A, _A = _replace_with_bnb_linear(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , _SCREAMING_SNAKE_CASE , )
return replace_with_bnb_linear(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , _SCREAMING_SNAKE_CASE , )
return set_module_quantized_tensor_to_device(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_A = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_A = sum(_SCREAMING_SNAKE_CASE , [] )
_A = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
_A = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_A = list(model.named_children() )
_A = [list_modules[-1][0]]
# add last module together with tied weights
_A = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_A = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
_A = ['.weight', '.bias']
_A = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_A = name.replace(_SCREAMING_SNAKE_CASE , '' )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
| 27
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
a_ = {
'facebook/m2m100_418M': 1_0_2_4,
}
# fmt: off
a_ = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = []
snake_case_ = []
def __init__( self : Dict , __lowercase : str , __lowercase : Any , __lowercase : Optional[Any]=None , __lowercase : int=None , __lowercase : Any="<s>" , __lowercase : str="</s>" , __lowercase : List[str]="</s>" , __lowercase : Union[str, Any]="<pad>" , __lowercase : Union[str, Any]="<unk>" , __lowercase : Dict="m2m100" , __lowercase : Optional[Dict[str, Any]] = None , __lowercase : Optional[Any]=8 , **__lowercase : List[str] , ) -> None:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE__ : Any =language_codes
SCREAMING_SNAKE_CASE__ : Optional[int] =FAIRSEQ_LANGUAGE_CODES[language_codes]
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
SCREAMING_SNAKE_CASE__ : str =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowercase , tgt_lang=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , language_codes=__lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] =load_json(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] ={v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : List[str] =spm_file
SCREAMING_SNAKE_CASE__ : Optional[Any] =load_spm(__lowercase , self.sp_model_kwargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(self.encoder )
SCREAMING_SNAKE_CASE__ : int ={
self.get_lang_token(__lowercase ): self.encoder_size + i for i, lang_code in enumerate(__lowercase )
}
SCREAMING_SNAKE_CASE__ : Optional[int] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowercase )}
SCREAMING_SNAKE_CASE__ : Optional[Any] ={v: k for k, v in self.lang_token_to_id.items()}
SCREAMING_SNAKE_CASE__ : List[Any] =src_lang if src_lang is not None else '''en'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =tgt_lang
SCREAMING_SNAKE_CASE__ : str =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_madeup_words
@property
def __magic_name__ ( self : str ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __magic_name__ ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : Any , __lowercase : str ) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def __magic_name__ ( self : Any , __lowercase : int ) -> List[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowercase , self.encoder[self.unk_token] )
def __magic_name__ ( self : List[str] , __lowercase : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : Any , __lowercase : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =[]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
SCREAMING_SNAKE_CASE__ : int =[]
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def __magic_name__ ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict ={self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Dict =None
return state
def __setstate__( self : Tuple , __lowercase : Dict ) -> None:
SCREAMING_SNAKE_CASE__ : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def __magic_name__ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : List[Any] =Path(__lowercase )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
SCREAMING_SNAKE_CASE__ : Any =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
SCREAMING_SNAKE_CASE__ : Any =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowercase )
elif not os.path.isfile(self.spm_file ):
with open(__lowercase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__ : List[str] =self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (str(__lowercase ), str(__lowercase ))
def __magic_name__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : str = "en" , __lowercase : Optional[List[str]] = None , __lowercase : str = "ro" , **__lowercase : Any , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =src_lang
SCREAMING_SNAKE_CASE__ : List[str] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Optional[str] , __lowercase : Optional[str] , **__lowercase : List[Any] ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE__ : Any =src_lang
SCREAMING_SNAKE_CASE__ : List[Any] =self(__lowercase , add_special_tokens=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_lang_id(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tgt_lang_id
return inputs
def __magic_name__ ( self : Any ) -> Tuple:
self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : Any ) -> Any:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> None:
SCREAMING_SNAKE_CASE__ : Dict =self.get_lang_token(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cur_lang_id]
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.eos_token_id]
def __magic_name__ ( self : Union[str, Any] , __lowercase : str ) -> None:
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_lang_token(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE__ : str =[self.cur_lang_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.eos_token_id]
def __magic_name__ ( self : Union[str, Any] , __lowercase : str ) -> str:
return self.lang_code_to_token[lang]
def __magic_name__ ( self : Any , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_lang_token(__lowercase )
return self.lang_token_to_id[lang_token]
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Dict[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =sentencepiece.SentencePieceProcessor(**UpperCamelCase__ )
spm.Load(str(UpperCamelCase__ ) )
return spm
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''r''' ) as f:
return json.load(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__, indent=2 )
| 296
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Optional[int] = len(a_ )
for i in range(length - 1 ):
_UpperCAmelCase : List[str] = i
for k in range(i + 1, a_ ):
if collection[k] < collection[least]:
_UpperCAmelCase : int = k
if least != i:
_UpperCAmelCase : Optional[int] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 709
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__a = 'hf-internal-testing/tiny-random-bert'
__a = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__a = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = cached_file(lowerCAmelCase__ , lowerCAmelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
with open(os.path.join(lowerCAmelCase__ , "refs" , "main" ) ) as f:
_UpperCAmelCase : int = f.read()
self.assertEqual(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "snapshots" , lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertTrue(os.path.isfile(lowerCAmelCase__ ) )
# File is cached at the same place the second time.
_UpperCAmelCase : Dict = cached_file(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Using a specific revision to test the full commit hash.
_UpperCAmelCase : Optional[int] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , revision="9b8c223" )
self.assertEqual(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "snapshots" , lowerCAmelCase__ , lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid model identifier" ):
_UpperCAmelCase : Any = cached_file("tiny-random-bert" , lowerCAmelCase__ )
with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid git identifier" ):
_UpperCAmelCase : List[Any] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , revision="aaaa" )
with self.assertRaisesRegex(lowerCAmelCase__ , "does not appear to have a file named" ):
_UpperCAmelCase : Union[str, Any] = cached_file(lowerCAmelCase__ , "conf" )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase__ , "does not appear to have a file named" ):
_UpperCAmelCase : Dict = cached_file(lowerCAmelCase__ , "conf" )
with open(os.path.join(lowerCAmelCase__ , "refs" , "main" ) ) as f:
_UpperCAmelCase : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , ".no_exist" , lowerCAmelCase__ , "conf" ) ) )
_UpperCAmelCase : Optional[int] = cached_file(lowerCAmelCase__ , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase__ )
self.assertIsNone(lowerCAmelCase__ )
_UpperCAmelCase : str = cached_file(lowerCAmelCase__ , "conf" , local_files_only=lowerCAmelCase__ , _raise_exceptions_for_missing_entries=lowerCAmelCase__ )
self.assertIsNone(lowerCAmelCase__ )
_UpperCAmelCase : Any = mock.Mock()
_UpperCAmelCase : str = 5_0_0
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Optional[int] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
_UpperCAmelCase : Optional[Any] = cached_file(lowerCAmelCase__ , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase__ )
self.assertIsNone(lowerCAmelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowerCAmelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowerCAmelCase__ , revision="ahaha" )
_UpperCAmelCase : Optional[Any] = get_file_from_repo("bert-base-cased" , lowerCAmelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
_UpperCAmelCase : List[Any] = json.loads(open(lowerCAmelCase__ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 7_6_8 )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Union[str, Any] = Path(lowerCAmelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase__ , "a.txt" ) , str(lowerCAmelCase__ ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase__ , "b.txt" ) )
| 257
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : str = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = '''▁'''
UpperCAmelCase_ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : List[Any] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase_ : Any = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase_ : Optional[Any] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
UpperCAmelCase_ : List[Any] = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[str] = ["input_ids"]
lowercase : Tuple = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self , _A , _A=None , _A=False , _A="utf8" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A = None , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =sentencepiece_model_ckpt
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_SCREAMING_SNAKE_CASE =self.load_vocab(filepath=_A )
else:
_SCREAMING_SNAKE_CASE ={self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.vocab.items()}
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if text is None:
return None
_SCREAMING_SNAKE_CASE =self.tokenize(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ='''''', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
_SCREAMING_SNAKE_CASE =self.SP_CHAR_MAPPING.get(_A )
else:
_SCREAMING_SNAKE_CASE =unicodedata.normalize('''NFKC''' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =normalized_text, [], 0
if self.do_lower_case:
_SCREAMING_SNAKE_CASE =text.lower()
for token in split_tokens:
if token[:1] == "▁":
_SCREAMING_SNAKE_CASE =token[1:]
_SCREAMING_SNAKE_CASE =text[offset:].index(_A ) + offset
_SCREAMING_SNAKE_CASE =start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_SCREAMING_SNAKE_CASE =end
return token_mapping
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return len(self.vocab )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def UpperCamelCase_ ( self , _A , _A=False , _A=6_4 , _A=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
_SCREAMING_SNAKE_CASE =True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
_SCREAMING_SNAKE_CASE =self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
_SCREAMING_SNAKE_CASE =self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
_SCREAMING_SNAKE_CASE =self.sp_model.EncodeAsPieces(_A )
else:
_SCREAMING_SNAKE_CASE =self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
_SCREAMING_SNAKE_CASE =[]
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
_SCREAMING_SNAKE_CASE =0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
_SCREAMING_SNAKE_CASE =i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_SCREAMING_SNAKE_CASE =i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_SCREAMING_SNAKE_CASE =i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(_A )
_SCREAMING_SNAKE_CASE =''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return self.reverse_vocab.get(_A , self.unk_token )
def UpperCamelCase_ ( self , _A , _A=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase_ ( self , _A , _A=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase_ ( self , _A , _A=None , _A=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def UpperCamelCase_ ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
_SCREAMING_SNAKE_CASE =unicodedata.category(_A )
if cat == "Zs":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
with io.open(_A , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_A ):
_SCREAMING_SNAKE_CASE =line.rstrip('''\n''' )
_SCREAMING_SNAKE_CASE =int(_A )
return token_to_idx
def UpperCamelCase_ ( self , _A , _A = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
if os.path.isdir(_A ):
_SCREAMING_SNAKE_CASE =os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_SCREAMING_SNAKE_CASE =(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_SCREAMING_SNAKE_CASE =token_index
writer.write(token + '''\n''' )
index += 1
_SCREAMING_SNAKE_CASE =os.path.join(_A , '''sentencepiece.bpe.model''' )
with open(_A , '''wb''' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,)
| 255
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase =field(default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase =field(
default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase =field(
default=SCREAMING_SNAKE_CASE__ ,metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} ,)
UpperCAmelCase =field(
default=SCREAMING_SNAKE_CASE__ ,metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} ,)
UpperCAmelCase =field(
default=SCREAMING_SNAKE_CASE__ ,metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} ,)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =super().to_dict()
for k, v in d.items():
if isinstance(_lowercase , _lowercase):
_UpperCAmelCase : Union[str, Any] =v.to_dict()
return d
| 721
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : int =os.path.dirname(os.path.realpath(__lowerCamelCase ) )
_UpperCAmelCase : List[Any] =os.path.join(__lowerCamelCase , 'words.txt' )
_UpperCAmelCase : int =''
with open(__lowerCamelCase ) as f:
_UpperCAmelCase : Tuple =f.readline()
_UpperCAmelCase : List[str] =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase : Dict =[
word
for word in [sum(ord(__lowerCamelCase ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 331
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Optional[Any] , lowercase_ : Dict ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
snake_case_ : Tuple = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__lowercase )
def _snake_case ( self : Optional[int] ):
snake_case_ : Dict = '''sshleifer/tiny-gpt2'''
snake_case_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowercase , multi_process=__lowercase , )
snake_case_ : Tuple = TensorFlowBenchmark(__lowercase )
snake_case_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self : List[str] ):
snake_case_ : str = '''sgugger/tiny-distilbert-classification'''
snake_case_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowercase , only_pretrain_model=__lowercase , )
snake_case_ : Tuple = TensorFlowBenchmark(__lowercase )
snake_case_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self : int ):
snake_case_ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
snake_case_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowercase , )
snake_case_ : str = TensorFlowBenchmark(__lowercase )
snake_case_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Optional[int] = '''sshleifer/tiny-gpt2'''
snake_case_ : Dict = AutoConfig.from_pretrained(__lowercase )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowercase , multi_process=__lowercase , )
snake_case_ : List[Any] = TensorFlowBenchmark(__lowercase , [config] )
snake_case_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Optional[int] = '''sshleifer/tiny-gpt2'''
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(__lowercase )
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowercase , )
snake_case_ : Union[str, Any] = TensorFlowBenchmark(__lowercase , [config] )
snake_case_ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self : int ):
snake_case_ : List[str] = '''sshleifer/tiny-gpt2'''
snake_case_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowercase , )
snake_case_ : int = TensorFlowBenchmark(__lowercase )
snake_case_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _snake_case ( self : str ):
snake_case_ : Optional[Any] = '''sshleifer/tiny-gpt2'''
snake_case_ : List[str] = AutoConfig.from_pretrained(__lowercase )
snake_case_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowercase , )
snake_case_ : List[str] = TensorFlowBenchmark(__lowercase , [config] )
snake_case_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _snake_case ( self : Any ):
snake_case_ : Optional[int] = '''patrickvonplaten/t5-tiny-random'''
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(__lowercase )
snake_case_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowercase , )
snake_case_ : Tuple = TensorFlowBenchmark(__lowercase , configs=[config] )
snake_case_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
snake_case_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowercase , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowercase , multi_process=__lowercase , )
snake_case_ : str = TensorFlowBenchmark(__lowercase )
snake_case_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : int = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowercase , save_to_csv=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowercase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(__lowercase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(__lowercase , '''env.csv''' ) , multi_process=__lowercase , )
snake_case_ : Dict = TensorFlowBenchmark(__lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowercase , '''env.csv''' ) ).exists() )
def _snake_case ( self : Any ):
snake_case_ : List[str] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowercase_ : str ):
self.assertTrue(hasattr(__lowercase , '''sequential''' ) )
self.assertTrue(hasattr(__lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(__lowercase , '''current''' ) )
self.assertTrue(hasattr(__lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowercase , '''log.txt''' ) , log_print=__lowercase , trace_memory_line_by_line=__lowercase , eager_mode=__lowercase , multi_process=__lowercase , )
snake_case_ : Tuple = TensorFlowBenchmark(__lowercase )
snake_case_ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__lowercase , '''log.txt''' ) ).exists() )
| 123
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = {'vocab_file': 'vocab.txt'}
lowercase__ :int = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowercase__ :Dict = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
lowercase__ :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : int = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_INIT_CONFIGURATION
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = ConvBertTokenizer
def __init__( self : int , __lowercase : List[Any]=None , __lowercase : int=None , __lowercase : Any=True , __lowercase : Dict="[UNK]" , __lowercase : Dict="[SEP]" , __lowercase : Dict="[PAD]" , __lowercase : int="[CLS]" , __lowercase : int="[MASK]" , __lowercase : List[str]=True , __lowercase : Optional[int]=None , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Optional[Any] = getattr(__lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : Any = do_lower_case
__UpperCAmelCase : int = strip_accents
__UpperCAmelCase : List[str] = tokenize_chinese_chars
__UpperCAmelCase : Optional[Any] = normalizer_class(**__lowercase )
__UpperCAmelCase : Any = do_lower_case
def A_ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Dict=None ):
'''simple docstring'''
__UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 522
| 0
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
__snake_case : Tuple = False
__snake_case : Optional[int] = False
def _UpperCamelCase ( UpperCamelCase_ : Namespace ) -> Optional[Any]:
"""simple docstring"""
return TrainCommand(UpperCamelCase_ )
class __SCREAMING_SNAKE_CASE ( __lowercase):
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=_UpperCamelCase , required=_UpperCamelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_UpperCamelCase , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=_UpperCamelCase , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=_UpperCamelCase , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=_UpperCamelCase , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=_UpperCamelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_UpperCamelCase , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=_UpperCamelCase , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=_UpperCamelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=_UpperCamelCase , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=_UpperCamelCase , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=_UpperCamelCase , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=_UpperCamelCase , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_UpperCamelCase )
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = logging.get_logger('transformers-cli/training' )
lowerCAmelCase__ = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_UpperCamelCase )
lowerCAmelCase__ = args.output
lowerCAmelCase__ = args.column_label
lowerCAmelCase__ = args.column_text
lowerCAmelCase__ = args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
lowerCAmelCase__ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
lowerCAmelCase__ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase__ = None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
lowerCAmelCase__ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase__ = args.validation_split
lowerCAmelCase__ = args.train_batch_size
lowerCAmelCase__ = args.valid_batch_size
lowerCAmelCase__ = args.learning_rate
lowerCAmelCase__ = args.adam_epsilon
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ):
"""simple docstring"""
raise NotImplementedError
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 365
|
import numpy as np
from PIL import Image
def _UpperCamelCase ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ = np.array(UpperCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# compute the shape of the output matrix
lowerCAmelCase__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
return updated_arr
def _UpperCamelCase ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ = np.array(UpperCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
# compute the shape of the output matrix
lowerCAmelCase__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
__snake_case : List[str] = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 365
| 1
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase=0.01 , UpperCAmelCase=1000 ) -> str:
_snake_case = p_stop
_snake_case = max_length
def __iter__(self ) -> Union[str, Any]:
_snake_case = 0
_snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
_snake_case = random.random() < self.p_stop
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True ) -> int:
_snake_case = [
BatchSamplerShard(UpperCAmelCase , 2 , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
for i in range(2 )
]
_snake_case = [list(UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCAmelCase ) for shard in batch_sampler_shards] , [len(UpperCAmelCase ) for e in expected] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
# Check the shards when the dataset is a round multiple of total batch size.
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase )
def lowercase (self ) -> Tuple:
# Check the shards when the dataset is a round multiple of total batch size.
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase )
_snake_case = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase )
def lowercase (self ) -> List[Any]:
# Check the shards when the dataset is a round multiple of batch size.
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase )
def lowercase (self ) -> Union[str, Any]:
_snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_snake_case = [BatchSamplerShard(UpperCAmelCase , 2 , UpperCAmelCase , even_batches=UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=False ) -> Optional[Any]:
random.seed(UpperCAmelCase )
_snake_case = list(UpperCAmelCase )
_snake_case = [
IterableDatasetShard(
UpperCAmelCase , batch_size=UpperCAmelCase , drop_last=UpperCAmelCase , num_processes=UpperCAmelCase , process_index=UpperCAmelCase , split_batches=UpperCAmelCase , )
for i in range(UpperCAmelCase )
]
_snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCAmelCase )
iterable_dataset_lists.append(list(UpperCAmelCase ) )
_snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
self.assertTrue(len(UpperCAmelCase ) % shard_batch_size == 0 )
_snake_case = []
for idx in range(0 , len(UpperCAmelCase ) , UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCAmelCase ) < len(UpperCAmelCase ):
reference += reference
self.assertListEqual(UpperCAmelCase , reference[: len(UpperCAmelCase )] )
def lowercase (self ) -> Optional[int]:
_snake_case = 42
_snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
# Edge case with a very small dataset
_snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCAmelCase )
_snake_case = SkipBatchSampler(UpperCAmelCase , 2 )
self.assertListEqual(list(UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase (self ) -> str:
_snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase (self ) -> Dict:
_snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
_snake_case = skip_first_batches(UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase (self ) -> Dict:
_snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase (self ) -> int:
Accelerator()
_snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 585
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 585
| 1
|
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ = "" , snake_case_ = False ) -> None:
# Mapping from the first character of the prefix of the node
__lowerCAmelCase = {}
# A node will be a leaf if the tree contains its word
__lowerCAmelCase = is_leaf
__lowerCAmelCase = prefix
def A__ ( self , snake_case_ ) -> tuple[str, str, str]:
__lowerCAmelCase = 0
for q, w in zip(self.prefix , snake_case_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A__ ( self , snake_case_ ) -> None:
for word in words:
self.insert(snake_case_ )
def A__ ( self , snake_case_ ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowerCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowerCAmelCase = RadixNode(prefix=snake_case_ , is_leaf=snake_case_ )
else:
__lowerCAmelCase = self.nodes[word[0]]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = incoming_node.match(
snake_case_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowerCAmelCase = remaining_prefix
__lowerCAmelCase = self.nodes[matching_string[0]]
__lowerCAmelCase = RadixNode(snake_case_ , snake_case_ )
__lowerCAmelCase = aux_node
if remaining_word == "":
__lowerCAmelCase = True
else:
self.nodes[matching_string[0]].insert(snake_case_ )
def A__ ( self , snake_case_ ) -> bool:
__lowerCAmelCase = self.nodes.get(word[0] , snake_case_ )
if not incoming_node:
return False
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = incoming_node.match(
snake_case_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case_ )
def A__ ( self , snake_case_ ) -> bool:
__lowerCAmelCase = self.nodes.get(word[0] , snake_case_ )
if not incoming_node:
return False
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = incoming_node.match(
snake_case_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowerCAmelCase = list(self.nodes.values() )[0]
__lowerCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
__lowerCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowerCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
__lowerCAmelCase = list(incoming_node.nodes.values() )[0]
__lowerCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowerCAmelCase = merging_node.nodes
return True
def A__ ( self , snake_case_ = 0 ) -> None:
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowercase ():
__lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
__lowerCAmelCase = RadixNode()
root.insert_many(_lowerCAmelCase )
assert all(root.find(_lowerCAmelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowercase ():
assert test_trie()
def lowercase ():
__lowerCAmelCase = RadixNode()
__lowerCAmelCase = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(_lowerCAmelCase )
print("""Words:""" , _lowerCAmelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 573
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = MgpstrTokenizer
_snake_case = False
_snake_case = {}
_snake_case = False
def A__ ( self ) -> List[Any]:
super().setUp()
# fmt: off
__lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
def A__ ( self , **snake_case_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """tester"""
__lowerCAmelCase = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__lowerCAmelCase = tokenizer.encode([special_token] , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
__lowerCAmelCase = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertNotEqual(len(snake_case_ ) , 0 )
__lowerCAmelCase = tokenizer.decode(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def A__ ( self ) -> Any:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def A__ ( self ) -> List[Any]:
pass
| 573
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
set_seed(7_70)
lowerCAmelCase = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
lowerCAmelCase = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase = os.path.join(os.path.expanduser("""~"""), """.cache""")
lowerCAmelCase = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str=False ) ->Tuple:
lowerCamelCase__ : List[str] =model_type
if use_small:
key += "_small"
return os.path.join(snake_case_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str ) ->Union[str, Any]:
os.makedirs(snake_case_ , exist_ok=snake_case_ )
hf_hub_download(repo_id=snake_case_ , filename=snake_case_ , local_dir=snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str]=False , snake_case_ : Any="text" ) ->List[Any]:
if model_type == "text":
lowerCamelCase__ : Tuple =BarkSemanticModel
lowerCamelCase__ : str =BarkSemanticConfig
lowerCamelCase__ : str =BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase__ : Optional[Any] =BarkCoarseModel
lowerCamelCase__ : List[Any] =BarkCoarseConfig
lowerCamelCase__ : Union[str, Any] =BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase__ : int =BarkFineModel
lowerCamelCase__ : Dict =BarkFineConfig
lowerCamelCase__ : Union[str, Any] =BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase__ : int =f"""{model_type}_small""" if use_small else model_type
lowerCamelCase__ : int =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
lowerCamelCase__ : List[str] =torch.load(snake_case_ , map_location=snake_case_ )
# this is a hack
lowerCamelCase__ : Union[str, Any] =checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowerCamelCase__ : Tuple =model_args['vocab_size']
lowerCamelCase__ : Optional[Any] =model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase__ : str =model_args.pop('n_head' )
lowerCamelCase__ : List[Any] =model_args.pop('n_embd' )
lowerCamelCase__ : Any =model_args.pop('n_layer' )
lowerCamelCase__ : List[str] =ConfigClass(**checkpoint['model_args'] )
lowerCamelCase__ : str =ModelClass(config=snake_case_ )
lowerCamelCase__ : Optional[int] =GenerationConfigClass()
lowerCamelCase__ : Optional[Any] =model_generation_config
lowerCamelCase__ : Optional[Any] =checkpoint['model']
# fixup checkpoint
lowerCamelCase__ : List[Any] ='_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(snake_case_ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase__ : int =k[len(snake_case_ ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase__ : Dict =new_k.replace(snake_case_ , new_layer_name_dict[old_layer_name] )
lowerCamelCase__ : Union[str, Any] =state_dict.pop(snake_case_ )
lowerCamelCase__ : Optional[Any] =set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase__ : Tuple ={k for k in extra_keys if not k.endswith('.attn.bias' )}
lowerCamelCase__ : Any =set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase__ : Any ={k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(snake_case_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(snake_case_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(snake_case_ , strict=snake_case_ )
lowerCamelCase__ : str =model.num_parameters(exclude_embeddings=snake_case_ )
lowerCamelCase__ : Union[str, Any] =checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(snake_case_ , 3 )} loss""" )
model.eval()
model.to(snake_case_ )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : str=False , snake_case_ : int="text" ) ->Tuple:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase__ : Dict ='cpu' # do conversion on cpu
lowerCamelCase__ : Union[str, Any] =_get_ckpt_path(snake_case_ , use_small=snake_case_ )
lowerCamelCase__ : Union[str, Any] =_load_model(snake_case_ , snake_case_ , model_type=snake_case_ , use_small=snake_case_ )
# load bark initial model
lowerCamelCase__ : Optional[Any] =_bark_load_model(snake_case_ , 'cpu' , model_type=snake_case_ , use_small=snake_case_ )
if model_type == "text":
lowerCamelCase__ : List[Any] =bark_model['model']
if model.num_parameters(exclude_embeddings=snake_case_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowerCamelCase__ : int =5
lowerCamelCase__ : Optional[int] =1_0
if model_type in ["text", "coarse"]:
lowerCamelCase__ : Tuple =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
lowerCamelCase__ : Optional[Any] =bark_model(snake_case_ )[0]
lowerCamelCase__ : Tuple =model(snake_case_ )
# take last logits
lowerCamelCase__ : Union[str, Any] =output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase__ : Union[str, Any] =3
lowerCamelCase__ : Tuple =8
lowerCamelCase__ : Union[str, Any] =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCamelCase__ : List[Any] =model(snake_case_ , snake_case_ )
lowerCamelCase__ : int =bark_model(snake_case_ , snake_case_ )
lowerCamelCase__ : str =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , ) ->str:
lowerCamelCase__ : Optional[int] =os.path.join(snake_case_ , snake_case_ )
lowerCamelCase__ : Tuple =BarkSemanticConfig.from_pretrained(os.path.join(snake_case_ , 'config.json' ) )
lowerCamelCase__ : Optional[int] =BarkCoarseConfig.from_pretrained(os.path.join(snake_case_ , 'config.json' ) )
lowerCamelCase__ : Optional[Any] =BarkFineConfig.from_pretrained(os.path.join(snake_case_ , 'config.json' ) )
lowerCamelCase__ : Dict =EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowerCamelCase__ : Union[str, Any] =BarkSemanticModel.from_pretrained(snake_case_ )
lowerCamelCase__ : Tuple =BarkCoarseModel.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =BarkFineModel.from_pretrained(snake_case_ )
lowerCamelCase__ : int =EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowerCamelCase__ : Optional[Any] =BarkConfig.from_sub_model_configs(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
lowerCamelCase__ : Optional[Any] =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCamelCase__ : List[Any] =BarkModel(snake_case_ )
lowerCamelCase__ : Union[str, Any] =semantic
lowerCamelCase__ : Dict =coarseAcoustic
lowerCamelCase__ : int =fineAcoustic
lowerCamelCase__ : List[Any] =codec
lowerCamelCase__ : int =bark_generation_config
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
bark.save_pretrained(snake_case_ , repo_id=snake_case_ , push_to_hub=snake_case_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 174
|
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Tuple ) ->Dict:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase__ : Optional[Any] =TOKENIZER_CLASSES
else:
lowerCamelCase__ : Any ={tokenizer_name: getattr(snake_case_ , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase__ : Any =TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase__ : List[Any] =True
if checkpoint_name is None:
lowerCamelCase__ : Union[str, Any] =list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase__ : Optional[Any] =[checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase__ : Dict =tokenizer_class.from_pretrained(snake_case_ , force_download=snake_case_ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase__ , lowerCamelCase__ : List[str] =checkpoint.split('/' )
lowerCamelCase__ : Optional[int] =os.path.join(snake_case_ , snake_case_ )
elif add_prefix:
lowerCamelCase__ : Union[str, Any] =checkpoint
lowerCamelCase__ : List[Any] =dump_path
else:
lowerCamelCase__ : str =None
lowerCamelCase__ : Dict =dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase__ : int =list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase__ : Union[str, Any] =file_path.split(snake_case_ )[-1][0]
if next_char == "/":
lowerCamelCase__ : Optional[int] =os.path.join(snake_case_ , snake_case_ )
lowerCamelCase__ : int =None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase__ : Optional[Any] =tokenizer.save_pretrained(
snake_case_ , legacy_format=snake_case_ , filename_prefix=snake_case_ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(snake_case_ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 174
| 1
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( __A):
def A ( self : Tuple ) -> str:
UpperCAmelCase_ : Optional[int] = SMALL_MODEL_IDENTIFIER
UpperCAmelCase_ : int = '''pt'''
UpperCAmelCase_ : Tuple = '''tf'''
def A ( self : Any , _A : Optional[int] ) -> int:
UpperCAmelCase_ : List[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_A )
def A ( self : List[str] , _A : Optional[Any] ) -> int:
UpperCAmelCase_ : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=_A )
model_tf.save_pretrained(_A )
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = '''mock_framework'''
# Framework provided - return whatever the user provides
UpperCAmelCase_ : Optional[int] = FeaturesManager.determine_framework(self.test_model , _A )
self.assertEqual(_A , _A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_A )
UpperCAmelCase_ : Union[str, Any] = FeaturesManager.determine_framework(_A , _A )
self.assertEqual(_A , _A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_A )
UpperCAmelCase_ : Optional[Any] = FeaturesManager.determine_framework(_A , _A )
self.assertEqual(_A , _A )
def A ( self : int ) -> List[str]:
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_A )
UpperCAmelCase_ : List[str] = FeaturesManager.determine_framework(_A )
self.assertEqual(_A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_A )
UpperCAmelCase_ : Optional[Any] = FeaturesManager.determine_framework(_A )
self.assertEqual(_A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_A ):
UpperCAmelCase_ : Union[str, Any] = FeaturesManager.determine_framework(_A )
def A ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ : List[str] = MagicMock(return_value=_A )
with patch('''transformers.onnx.features.is_tf_available''' , _A ):
UpperCAmelCase_ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase_ : Optional[int] = MagicMock(return_value=_A )
with patch('''transformers.onnx.features.is_torch_available''' , _A ):
UpperCAmelCase_ : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase_ : List[str] = MagicMock(return_value=_A )
UpperCAmelCase_ : List[str] = MagicMock(return_value=_A )
with patch('''transformers.onnx.features.is_tf_available''' , _A ), patch(
'''transformers.onnx.features.is_torch_available''' , _A ):
UpperCAmelCase_ : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase_ : str = MagicMock(return_value=_A )
UpperCAmelCase_ : str = MagicMock(return_value=_A )
with patch('''transformers.onnx.features.is_tf_available''' , _A ), patch(
'''transformers.onnx.features.is_torch_available''' , _A ):
with self.assertRaises(_A ):
UpperCAmelCase_ : List[str] = FeaturesManager.determine_framework(self.test_model )
| 708
|
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __UpperCAmelCase ( A : np.ndarray ) -> np.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def __UpperCAmelCase ( A : np.ndarray ) -> np.ndarray:
return (gray > 1_2_7) & (gray <= 2_5_5)
def __UpperCAmelCase ( A : np.ndarray , A : np.ndarray ) -> np.ndarray:
UpperCAmelCase_ : List[Any] = np.zeros_like(A )
UpperCAmelCase_ : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase_ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase_ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase_ : List[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_UpperCamelCase : str = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_UpperCamelCase : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
_UpperCamelCase : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_UpperCamelCase : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_UpperCamelCase : int = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 216
| 0
|
'''simple docstring'''
def __snake_case ( lowercase : Optional[Any] ):
snake_case_ = len(lowercase )
snake_case_ = sum(lowercase )
snake_case_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
snake_case_ = True
for i in range(1 , s + 1 ):
snake_case_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
snake_case_ = dp[i][j - 1]
if arr[i - 1] <= j:
snake_case_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
snake_case_ = s - 2 * j
break
return diff
| 508
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase_ , ):
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = 99
snake_case_ = 32
snake_case_ = 2
snake_case_ = 4
snake_case_ = 37
snake_case_ = "gelu"
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 5_12
snake_case_ = 16
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = None
def _lowercase ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = TFDistilBertModel(config=UpperCAmelCase_ )
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = TFDistilBertForMaskedLM(config=UpperCAmelCase_ )
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = TFDistilBertForQuestionAnswering(config=UpperCAmelCase_ )
snake_case_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.num_labels
snake_case_ = TFDistilBertForSequenceClassification(UpperCAmelCase_ )
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.num_choices
snake_case_ = TFDistilBertForMultipleChoice(UpperCAmelCase_ )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.num_labels
snake_case_ = TFDistilBertForTokenClassification(UpperCAmelCase_ )
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self ):
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
snake_case = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
def _lowercase ( self ):
snake_case_ = TFDistilBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , dim=37 )
def _lowercase ( self ):
self.config_tester.run_common_tests()
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase_ )
@slow
def _lowercase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case_ = TFDistilBertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self ):
snake_case_ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = [1, 6, 7_68]
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
| 508
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ : str = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def __lowercase ( _A , _A ) -> List[str]:
inspect_dataset(_A , _A )
SCREAMING_SNAKE_CASE : Union[str, Any] = path + """.py"""
assert script_name in os.listdir(_A )
assert "__pycache__" not in os.listdir(_A )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def __lowercase ( _A , _A ) -> Optional[Any]:
inspect_metric(_A , _A )
SCREAMING_SNAKE_CASE : Optional[int] = path + """.py"""
assert script_name in os.listdir(_A )
assert "__pycache__" not in os.listdir(_A )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __lowercase ( _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = get_dataset_config_info(_A , config_name=_A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __lowercase ( _A , _A , _A ) -> List[str]:
with pytest.raises(_A ):
get_dataset_config_info(_A , config_name=_A )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def __lowercase ( _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Dict = get_dataset_config_names(_A )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def __lowercase ( _A , _A , _A ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = get_dataset_infos(_A )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE : Any = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __lowercase ( _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple = get_dataset_infos(_A )
assert expected_config in infos
SCREAMING_SNAKE_CASE : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __lowercase ( _A , _A , _A ) -> Optional[Any]:
with pytest.raises(_A ):
get_dataset_split_names(_A , config_name=_A )
| 446
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = False
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
UpperCAmelCase__ : List[str] = parser.parse_args()
UpperCAmelCase__ : Dict = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
UpperCAmelCase__ : str = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
UpperCAmelCase__ : Union[str, Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
UpperCAmelCase__ : Union[str, Any] = reader.read()
UpperCAmelCase__ : Any = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
UpperCAmelCase__ : int = UNetaDModel(**config)
else:
UpperCAmelCase__ : Optional[Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
UpperCAmelCase__ : int = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase__ : Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase__ : str = config[key]
del config[key]
UpperCAmelCase__ : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
UpperCAmelCase__ : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
UpperCAmelCase__ : Dict = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
UpperCAmelCase__ : Optional[Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
UpperCAmelCase__ : Union[str, Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
UpperCAmelCase__ : List[Any] = param_value
UpperCAmelCase__ : List[str] = True
if not has_changed:
UpperCAmelCase__ : Any = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 446
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: int , a: List[str] , a: Optional[Any] , a: int ):
__lowerCamelCase : List[Any] = dataset
__lowerCamelCase : List[Any] = process
__lowerCamelCase : Optional[int] = params
def __len__( self: Dict ):
return len(self.dataset )
def __getitem__( self: Optional[int] , a: Tuple ):
__lowerCamelCase : List[str] = self.dataset[i]
__lowerCamelCase : Optional[Any] = self.process(__lowerCAmelCase , **self.params )
return processed
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: str , a: Optional[int] , a: Tuple , a: Any , a: str=None ):
__lowerCamelCase : Any = loader
__lowerCamelCase : Optional[Any] = infer
__lowerCamelCase : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__lowerCamelCase : List[str] = None
__lowerCamelCase : Tuple = loader_batch_size
# Internal bookkeeping
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[Any] = None
def __len__( self: Optional[Any] ):
return len(self.loader )
def __iter__( self: Union[str, Any] ):
__lowerCamelCase : int = iter(self.loader )
return self
def _snake_case ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__lowerCamelCase : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__lowerCamelCase : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# Convert ModelOutput to tuple first
__lowerCamelCase : Any = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__lowerCamelCase : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowerCamelCase : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__lowerCamelCase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowerCamelCase : List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__lowerCamelCase : Any = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowerCamelCase : str = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowerCamelCase : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__lowerCamelCase : Optional[int] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__lowerCamelCase : Union[str, Any] = self._loader_batch_data.__class__(__lowerCAmelCase )
self._loader_batch_index += 1
return result
def _snake_case ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__lowerCamelCase : Dict = next(self.iterator )
__lowerCamelCase : Optional[Any] = self.infer(__lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__lowerCAmelCase , torch.Tensor ):
__lowerCamelCase : Optional[Any] = processed
else:
__lowerCamelCase : List[str] = list(processed.keys() )[0]
__lowerCamelCase : List[str] = processed[key]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase : Tuple = len(__lowerCAmelCase )
else:
__lowerCamelCase : Optional[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowerCamelCase : int = observed_batch_size
# Setting internal index to unwrap the batch
__lowerCamelCase : Optional[Any] = processed
__lowerCamelCase : Optional[int] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Optional[Any] , a: Union[str, Any] , a: Optional[Any] , a: Dict , a: List[Any]=None ):
super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __iter__( self: str ):
__lowerCamelCase : str = iter(self.loader )
__lowerCamelCase : int = None
return self
def _snake_case ( self: str ):
if self.subiterator is None:
__lowerCamelCase : Tuple = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__lowerCamelCase : List[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__lowerCamelCase : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
__lowerCamelCase : int = next(self.subiterator )
return processed
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __iter__( self: str ):
__lowerCamelCase : List[str] = iter(self.loader )
return self
def _snake_case ( self: List[str] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Any = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__lowerCamelCase : Optional[Any] = self.loader_batch_item()
__lowerCamelCase : Union[str, Any] = item.pop('is_last' )
accumulator.append(__lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
__lowerCamelCase : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__lowerCAmelCase , torch.Tensor ):
__lowerCamelCase : List[str] = processed
else:
__lowerCamelCase : Optional[Any] = list(processed.keys() )[0]
__lowerCamelCase : List[str] = processed[key]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
else:
__lowerCamelCase : Any = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowerCamelCase : List[Any] = observed_batch_size
__lowerCamelCase : Dict = processed
__lowerCamelCase : Any = 0
while self._loader_batch_index < self.loader_batch_size:
__lowerCamelCase : str = self.loader_batch_item()
__lowerCamelCase : int = item.pop('is_last' )
accumulator.append(__lowerCAmelCase )
if is_last:
return accumulator
else:
__lowerCamelCase : int = processed
__lowerCamelCase : Union[str, Any] = item.pop('is_last' )
accumulator.append(__lowerCAmelCase )
return accumulator
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: str , a: Union[str, Any] ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : Optional[Any] = key
def __len__( self: Any ):
return len(self.dataset )
def __getitem__( self: str , a: List[Any] ):
return self.dataset[i][self.key]
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Dict , a: Any , a: Any , a: List[str] ):
__lowerCamelCase : Union[str, Any] = dataset
__lowerCamelCase : Optional[int] = keya
__lowerCamelCase : Optional[int] = keya
def __len__( self: Any ):
return len(self.dataset )
def __getitem__( self: Any , a: str ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 669
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__: Any = logging.get_logger(__name__)
lowerCAmelCase__: Union[str, Any] = "https://openaipublic.azureedge.net/jukebox/models/"
lowerCAmelCase__: Union[str, Any] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Dict = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE_ : Any = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE_ : str = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
import re
SCREAMING_SNAKE_CASE_ : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Any = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : List[str] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Any = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Any = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : Tuple = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : int = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : int = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : Dict = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Tuple = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Dict = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : str = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Tuple = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : str = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Optional[int] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : str = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Tuple = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Tuple = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Dict = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : str = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : str = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : Optional[int] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Any = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : Tuple = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Tuple = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# keep original key
else:
SCREAMING_SNAKE_CASE_ : List[Any] = original_key
SCREAMING_SNAKE_CASE_ : Optional[int] = replace_key(SCREAMING_SNAKE_CASE )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
SCREAMING_SNAKE_CASE_ : List[str] = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
SCREAMING_SNAKE_CASE_ : int = original_key
SCREAMING_SNAKE_CASE_ : Any = original_key
SCREAMING_SNAKE_CASE_ : Any = value
return new_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
SCREAMING_SNAKE_CASE_ : List[str] = requests.get(f'{PREFIX}{file}' , allow_redirects=SCREAMING_SNAKE_CASE )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=SCREAMING_SNAKE_CASE )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
SCREAMING_SNAKE_CASE_ : List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
SCREAMING_SNAKE_CASE_ : str = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = JukeboxModel(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : str = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
SCREAMING_SNAKE_CASE_ : List[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
SCREAMING_SNAKE_CASE_ : int = old_dic[k]
elif k.endswith('.w' ):
SCREAMING_SNAKE_CASE_ : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE_ : List[Any] = old_dic[k]
else:
SCREAMING_SNAKE_CASE_ : int = old_dic[k]
SCREAMING_SNAKE_CASE_ : Tuple = 'vqvae' if i == 0 else f'priors.{3 - i}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fix_jukebox_keys(SCREAMING_SNAKE_CASE , model.state_dict() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
weight_dict.append(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
with open(f'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
lowerCAmelCase__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
lowerCAmelCase__: List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 345
| 0
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase( lowerCAmelCase__ ):
@slow
@require_torch
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
SCREAMING_SNAKE_CASE_ :List[str] = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE_ :Tuple = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ :str = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE_ :Any = 128
SCREAMING_SNAKE_CASE_ :Any = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
SCREAMING_SNAKE_CASE_ :Tuple = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
SCREAMING_SNAKE_CASE_ :List[str] = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE_ :Any = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE_ :Optional[int] = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE_ :Any = tokenizer(batch['article'] , padding='max_length' , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
SCREAMING_SNAKE_CASE_ :int = tokenizer(batch['highlights'] , padding='max_length' , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
SCREAMING_SNAKE_CASE_ :Any = inputs.input_ids
SCREAMING_SNAKE_CASE_ :Tuple = inputs.attention_mask
SCREAMING_SNAKE_CASE_ :str = outputs.input_ids
SCREAMING_SNAKE_CASE_ :Optional[Any] = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
SCREAMING_SNAKE_CASE_ :List[str] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : Dict ):
SCREAMING_SNAKE_CASE_ :List[Any] = pred.label_ids
SCREAMING_SNAKE_CASE_ :Dict = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE_ :int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE_ :Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
SCREAMING_SNAKE_CASE_ :List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ :int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy='steps' , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE_ :Tuple = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 702
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ :Any = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE_ :List[Any] = False
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ :int = False
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Dict = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=SCREAMING_SNAKE_CASE , choices=['distilbert', 'roberta', 'gpt2'] , required=SCREAMING_SNAKE_CASE , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=SCREAMING_SNAKE_CASE , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=SCREAMING_SNAKE_CASE , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.1_5 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=SCREAMING_SNAKE_CASE , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=SCREAMING_SNAKE_CASE , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=SCREAMING_SNAKE_CASE , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=SCREAMING_SNAKE_CASE , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.0_5 , type=SCREAMING_SNAKE_CASE , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=SCREAMING_SNAKE_CASE , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=SCREAMING_SNAKE_CASE , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.0_2 , type=SCREAMING_SNAKE_CASE , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=SCREAMING_SNAKE_CASE , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=SCREAMING_SNAKE_CASE , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=SCREAMING_SNAKE_CASE , default=4000 , help='Checkpoint interval.' )
SCREAMING_SNAKE_CASE_ :List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE )
set_seed(SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE_ :Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
SCREAMING_SNAKE_CASE_ :Dict = special_tok_ids
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , 'rb' ) as fp:
SCREAMING_SNAKE_CASE_ :Any = pickle.load(SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , 'rb' ) as fp:
SCREAMING_SNAKE_CASE_ :Tuple = pickle.load(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = np.maximum(SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE_ :Tuple = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE_ :int = torch.from_numpy(SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :List[Any] = None
SCREAMING_SNAKE_CASE_ :List[str] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE , data=SCREAMING_SNAKE_CASE )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
SCREAMING_SNAKE_CASE_ :Tuple = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE_ :Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
SCREAMING_SNAKE_CASE_ :List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :str = student_model_class(SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info('Student loaded.' )
# TEACHER #
SCREAMING_SNAKE_CASE_ :Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ :List[Any] = Distiller(
params=SCREAMING_SNAKE_CASE , dataset=SCREAMING_SNAKE_CASE , token_probs=SCREAMING_SNAKE_CASE , student=SCREAMING_SNAKE_CASE , teacher=SCREAMING_SNAKE_CASE )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 233
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.