code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = torch.load(_snake_case , map_location="cpu" )
__magic_name__ : List[Any] = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__magic_name__ : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__magic_name__ : Optional[Any] = v
else:
__magic_name__ : Tuple = v
__magic_name__ : int = chkpt["params"]
__magic_name__ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_snake_case , (torch.FloatTensor, numpy.ndarray) )}
__magic_name__ : Optional[int] = chkpt["dico_word2id"]
__magic_name__ : List[str] = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
__magic_name__ : Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__magic_name__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
__magic_name__ : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(_snake_case , _snake_case )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_snake_case , indent=2 ) + "\n" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_snake_case , indent=2 ) + "\n" )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 124
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=False , _a=True , _a=False , _a=True , _a=33 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__magic_name__ : Dict = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : int = is_training
__magic_name__ : Union[str, Any] = use_input_mask
__magic_name__ : str = use_token_type_ids
__magic_name__ : Dict = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Tuple = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : int = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : Optional[int] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Optional[Any] = scope
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = None
if self.use_input_mask:
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
__magic_name__ : List[Any] = None
__magic_name__ : List[str] = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Dict = EsmModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a , attention_mask=_a )
__magic_name__ : List[str] = model(_a )
__magic_name__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = EsmForMaskedLM(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = self.num_labels
__magic_name__ : int = EsmForTokenClassification(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = config_and_inputs
__magic_name__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = ()
UpperCamelCase__ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = EsmModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : str = type
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[Any] = EsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : List[str] = EsmEmbeddings(config=_a )
__magic_name__ : Dict = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__magic_name__ : Tuple = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__magic_name__ : Dict = create_position_ids_from_input_ids(_a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : str = EsmEmbeddings(config=_a )
__magic_name__ : Optional[Any] = torch.empty(2 , 4 , 30 )
__magic_name__ : str = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__magic_name__ : Tuple = torch.as_tensor([expected_single_positions, expected_single_positions] )
__magic_name__ : List[str] = embeddings.create_position_ids_from_inputs_embeds(_a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@require_torch
class _snake_case ( snake_case ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Dict = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Dict = model(_a )[0]
__magic_name__ : Optional[Any] = 33
__magic_name__ : Optional[int] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _a )
__magic_name__ : List[Any] = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Optional[int] = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Optional[Any] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 124
| 1
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = XLNetTokenizer
_snake_case = XLNetTokenizerFast
_snake_case = True
_snake_case = True
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : int = XLNetTokenizer(__a , keep_accents=__a)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = '<s>'
__snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a)
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '<eod>')
self.assertEqual(len(__a) , 1_0_0_6)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : Dict = XLNetTokenizer(__a , keep_accents=__a)
__snake_case : List[Any] = tokenizer.tokenize('This is a test')
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
__snake_case : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(__a , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = XLNetTokenizer(__a , do_lower_case=__a)
__snake_case : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o'])
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : int = XLNetTokenizer(__a , do_lower_case=__a)
__snake_case : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = XLNetTokenizer.from_pretrained('xlnet-base-cased')
__snake_case : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=__a)
__snake_case : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a)
__snake_case : Dict = tokenizer.build_inputs_with_special_tokens(__a)
__snake_case : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 712
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : List[str] = import_module('tasks' )
try:
__snake_case : Any = getattr(A , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(A ) )
__snake_case : Optional[Any] = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : int = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : str = np.argmax(A , axis=2 )
__snake_case ,__snake_case : int = preds.shape
__snake_case : Dict = [[] for _ in range(A )]
__snake_case : Union[str, Any] = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
__snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
__snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
__snake_case : str = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A )
__snake_case ,__snake_case : List[str] = align_predictions(A , A )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def _SCREAMING_SNAKE_CASE ( A : int ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 61
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: List[str] ={
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Any =[
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : Optional[Any] =logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int =tf.data.AUTOTUNE
def UpperCamelCase__ ( ):
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" ,type=snake_case_ ,default="""roberta-base""" ,help="""The model config to use. Note that we don't copy the model's weights, only the config!""" ,)
parser.add_argument(
"""--tokenizer""" ,type=snake_case_ ,default="""unigram-tokenizer-wikitext""" ,help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" ,)
parser.add_argument(
"""--per_replica_batch_size""" ,type=snake_case_ ,default=8 ,help="""Batch size per TPU core.""" ,)
parser.add_argument(
"""--no_tpu""" ,action="""store_true""" ,help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" ,)
parser.add_argument(
"""--tpu_name""" ,type=snake_case_ ,help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" ,default="""local""" ,)
parser.add_argument(
"""--tpu_zone""" ,type=snake_case_ ,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" ,)
parser.add_argument(
"""--gcp_project""" ,type=snake_case_ ,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" ,action="""store_true""" ,help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" ,)
parser.add_argument(
"""--train_dataset""" ,type=snake_case_ ,help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" ,)
parser.add_argument(
"""--shuffle_buffer_size""" ,type=snake_case_ ,default=2**18 ,help="""Size of the shuffle buffer (in samples)""" ,)
parser.add_argument(
"""--eval_dataset""" ,type=snake_case_ ,help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=snake_case_ ,default=1 ,help="""Number of epochs to train for.""" ,)
parser.add_argument(
"""--learning_rate""" ,type=snake_case_ ,default=1E-4 ,help="""Learning rate to use for training.""" ,)
parser.add_argument(
"""--weight_decay_rate""" ,type=snake_case_ ,default=1E-3 ,help="""Weight decay rate to use for training.""" ,)
parser.add_argument(
"""--max_length""" ,type=snake_case_ ,default=512 ,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" ,)
parser.add_argument(
"""--mlm_probability""" ,type=snake_case_ ,default=0.15 ,help="""Fraction of tokens to mask during training.""" ,)
parser.add_argument("""--output_dir""" ,type=snake_case_ ,required=snake_case_ ,help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" ,type=snake_case_ ,help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCamelCase__ ( lowerCAmelCase__ ):
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(snake_case_ )
tf.tpu.experimental.initialize_tpu_system(snake_case_ )
return tpu
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(r"""-\d+-(\d+)\.tfrecord""" ,snake_case_ ).group(1 )
lowercase = int(snake_case_ )
num_samples += sample_count
return num_samples
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowercase = count_samples(snake_case_ )
lowercase = tf.data.Dataset.from_tensor_slices(snake_case_ )
if shuffle:
lowercase = dataset.shuffle(len(snake_case_ ) )
lowercase = tf.data.TFRecordDataset(snake_case_ ,num_parallel_reads=snake_case_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(snake_case_ ) )
lowercase = dataset.map(snake_case_ ,num_parallel_calls=snake_case_ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(snake_case_ ,drop_remainder=snake_case_ )
lowercase = dataset.map(snake_case_ ,num_parallel_calls=snake_case_ )
lowercase = dataset.prefetch(snake_case_ )
return dataset
def UpperCamelCase__ ( lowerCAmelCase__ ):
if not args.no_tpu:
lowercase = initialize_tpu(snake_case_ )
lowercase = tf.distribute.TPUStrategy(snake_case_ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset ,"""*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset ,"""*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
lowercase = count_samples(snake_case_ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(snake_case_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase = create_optimizer(
num_train_steps=snake_case_ ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=snake_case_ ,metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(snake_case_ ,snake_case_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=snake_case_ ,mlm_probability=args.mlm_probability ,mlm=snake_case_ ,return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] ,tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] ,vocab_size=len(snake_case_ ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=snake_case_ ,)
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
snake_case_ ,decode_fn=snake_case_ ,mask_fn=snake_case_ ,batch_size=snake_case_ ,shuffle=snake_case_ ,shuffle_buffer_size=args.shuffle_buffer_size ,)
lowercase = prepare_dataset(
snake_case_ ,decode_fn=snake_case_ ,mask_fn=snake_case_ ,batch_size=snake_case_ ,shuffle=snake_case_ ,)
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=snake_case_ ) )
model.fit(
snake_case_ ,validation_data=snake_case_ ,epochs=args.num_epochs ,callbacks=snake_case_ ,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] =parse_args()
main(args)
| 710
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE : List[Any] ='''.'''
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
__SCREAMING_SNAKE_CASE : Dict =[]
__SCREAMING_SNAKE_CASE : Dict =[]
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE : Optional[Any] =line.strip()
__SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 72
| 0
|
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__magic_name__ : Optional[Any] =[
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
__magic_name__ : Dict =[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
__magic_name__ : Union[str, Any] =primes[:idx]
break
__magic_name__ , __magic_name__ : Tuple =n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__magic_name__ : Tuple =False
for r in range(lowerCamelCase ):
__magic_name__ : int =pow(lowerCamelCase , d * 2**r , lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__magic_name__ : Optional[int] =True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCAmelCase_ ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21
|
"""simple docstring"""
A_ = 2_56
# Modulus to hash a string
A_ = 1_00_00_03
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Any = len(snake_case__ )
_snake_case : Tuple = len(snake_case__ )
if p_len > t_len:
return False
_snake_case : str = 0
_snake_case : Tuple = 0
_snake_case : Tuple = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
_snake_case : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_snake_case : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_snake_case : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_snake_case : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = """abc1abc12"""
_snake_case : Tuple = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_snake_case : Optional[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ )
# Test 2)
_snake_case : Tuple = """ABABX"""
_snake_case : Optional[Any] = """ABABZABABYABABX"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 3)
_snake_case : Union[str, Any] = """AAAB"""
_snake_case : str = """ABAAAAAB"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 4)
_snake_case : List[str] = """abcdabcy"""
_snake_case : Optional[int] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 5)
_snake_case : Union[str, Any] = """Lü"""
_snake_case : Optional[int] = """Lüsai"""
assert rabin_karp(snake_case__ , snake_case__ )
_snake_case : Any = """Lue"""
assert not rabin_karp(snake_case__ , snake_case__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 609
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Optional[Any] = DDIMPipeline
lowercase_ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : Tuple = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
lowercase_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ : Tuple = False
def _a ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {"unet": unet, "scheduler": scheduler}
return components
def _a ( self , a_ , a_=0 ) -> Any:
if str(a_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(a_ )
else:
_UpperCAmelCase = torch.Generator(device=a_ ).manual_seed(a_ )
_UpperCAmelCase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = "cpu"
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase = self.get_dummy_inputs(a_ )
_UpperCAmelCase = pipe(**a_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_UpperCAmelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1e-3 )
def _a ( self ) -> Union[str, Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _a ( self ) -> int:
super().test_save_load_local(expected_max_difference=3e-3 )
def _a ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _a ( self ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "google/ddpm-cifar10-32"
_UpperCAmelCase = UNetaDModel.from_pretrained(a_ )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = DDIMPipeline(unet=a_ , scheduler=a_ )
ddim.to(a_ )
ddim.set_progress_bar_config(disable=a_ )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ddim(generator=a_ , eta=0.0 , output_type="numpy" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ) -> Dict:
_UpperCAmelCase = "google/ddpm-ema-bedroom-256"
_UpperCAmelCase = UNetaDModel.from_pretrained(a_ )
_UpperCAmelCase = DDIMScheduler.from_pretrained(a_ )
_UpperCAmelCase = DDIMPipeline(unet=a_ , scheduler=a_ )
ddpm.to(a_ )
ddpm.set_progress_bar_config(disable=a_ )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ddpm(generator=a_ , output_type="numpy" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 657
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657
| 1
|
"""simple docstring"""
lowercase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 63
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63
| 1
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def A (__lowerCamelCase :str ):
_lowerCAmelCase = {}
_lowerCAmelCase = os.path.join(__lowerCamelCase , """all_results.json""" )
if os.path.exists(__lowerCamelCase ):
with open(__lowerCamelCase , """r""" ) as f:
_lowerCAmelCase = json.load(__lowerCamelCase )
else:
raise ValueError(f'can\'t find {path}' )
return results
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
import xla_spawn
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_lowercase , """argv""" , _lowercase ):
_lowerCAmelCase = time()
xla_spawn.main()
_lowerCAmelCase = time()
_lowerCAmelCase = get_results(_lowercase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _lowercase ( self ):
"""simple docstring"""
import xla_spawn
_lowerCAmelCase = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(_lowercase , """argv""" , _lowercase ):
xla_spawn.main()
| 5
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Tuple = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
__snake_case : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCAmelCase ( a__):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a_ : Tuple = model_type_to_module_name(a__)
a_ : Any = importlib.import_module(f'''.{module_name}''' , """transformers.models""")
try:
return getattr(a__ , a__)
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a__ , """__name__""" , a__) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a_ : Tuple = importlib.import_module("""transformers""")
if hasattr(a__ , a__):
return getattr(a__ , a__)
return None
def _UpperCAmelCase ( a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ):
'''simple docstring'''
a_ : List[str] = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""")
return {}
with open(a__ , encoding="""utf-8""") as reader:
return json.load(a__)
class A__:
"""simple docstring"""
def __init__( self ) -> List[str]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCamelCase__ ( cls , _lowercase , **_lowercase ) -> Dict:
a_ : Tuple = kwargs.pop("""config""" , _lowercase )
a_ : Dict = kwargs.pop("""trust_remote_code""" , _lowercase )
a_ : Dict = True
a_ , a_ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_lowercase , **_lowercase )
a_ : Tuple = config_dict.get("""feature_extractor_type""" , _lowercase )
a_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
a_ : Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
a_ : List[str] = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.feature_extractor_type``
a_ : List[Any] = getattr(_lowercase , """feature_extractor_type""" , _lowercase )
if hasattr(_lowercase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
a_ : List[str] = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
a_ : int = feature_extractor_class_from_name(_lowercase )
a_ : Dict = feature_extractor_auto_map is not None
a_ : Union[str, Any] = feature_extractor_class is not None or type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING
a_ : Optional[Any] = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
a_ : int = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
a_ : Tuple = kwargs.pop("""code_revision""" , _lowercase )
if os.path.isdir(_lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING:
a_ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(_lowercase )]
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> List[Any]:
FEATURE_EXTRACTOR_MAPPING.register(_lowercase , _lowercase )
| 540
| 0
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __a :
__lowercase : float
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
def snake_case_ ( snake_case ) -> bool:
# Validation
def is_valid_tree(snake_case ) -> bool:
if node is None:
return True
if not isinstance(snake_case , snake_case ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
snake_case , snake_case , snake_case ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case )
)
return is_binary_search_tree_recursive_check(snake_case , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import socket
def snake_case_ ( ) -> List[str]:
lowercase__: int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__: Any = socket.gethostname()
lowercase__: Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowercase__: Optional[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 335
| 0
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int ='ssube/stable-diffusion-x4-upscaler-onnx'
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=0 ) -> int:
UpperCamelCase :List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
UpperCamelCase :Optional[int] = torch.manual_seed(_UpperCAmelCase )
UpperCamelCase :Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase :str = self.get_dummy_inputs()
UpperCamelCase :Any = pipe(**_UpperCAmelCase ).images
UpperCamelCase :str = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :Any = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase :Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase :List[Any] = self.get_dummy_inputs()
UpperCamelCase :List[Any] = pipe(**_UpperCAmelCase ).images
UpperCamelCase :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :Tuple = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase :Dict = self.get_dummy_inputs()
UpperCamelCase :Optional[Any] = pipe(**_UpperCAmelCase ).images
UpperCamelCase :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :List[Any] = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase :int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs()
UpperCamelCase :Union[str, Any] = pipe(**_UpperCAmelCase ).images
UpperCamelCase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :Any = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase :Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase :Dict = self.get_dummy_inputs()
UpperCamelCase :int = pipe(**_UpperCAmelCase ).images
UpperCamelCase :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :Optional[int] = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Optional[Any] = ort.SessionOptions()
UpperCamelCase :Dict = False
return options
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase :int = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase :Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase :List[Any] = '''A fantasy landscape, trending on artstation'''
UpperCamelCase :Optional[int] = torch.manual_seed(0 )
UpperCamelCase :Optional[int] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
UpperCamelCase :Any = output.images
UpperCamelCase :List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase :List[str] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase :Optional[Any] = init_image.resize((128, 128) )
UpperCamelCase :str = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
UpperCamelCase :Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase :Optional[Any] = '''A fantasy landscape, trending on artstation'''
UpperCamelCase :Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase :str = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
UpperCamelCase :List[str] = output.images
UpperCamelCase :Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase :Dict = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 658
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Tuple = parse_args()
# Import training_script as a module.
__UpperCAmelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCAmelCase : List[Any] = script_fpath.stem
__UpperCAmelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
__UpperCAmelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 716
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = "laion/clap-htsat-unfused"
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
def a_ ( self : Optional[Any] , **UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase_)
def a_ ( self : Union[str, Any] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase_)
def a_ ( self : List[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : str = self.get_feature_extractor()
__UpperCAmelCase : List[Any] = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
__UpperCAmelCase : Any = self.get_feature_extractor(do_normalize=UpperCamelCase_ , padding_value=1.0)
__UpperCAmelCase : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.get_feature_extractor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Any = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = floats_list((3, 1000))
__UpperCAmelCase : Tuple = feature_extractor(UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Any = processor(audios=UpperCamelCase_ , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : str = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Any = "This is a test string"
__UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_)
__UpperCAmelCase : Dict = tokenizer(UpperCamelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = self.get_feature_extractor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[str] = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : int = processor.batch_decode(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_feature_extractor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : int = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 487
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
__snake_case = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
__snake_case = {
"allenai/longformer-base-4096": 4_0_9_6,
"allenai/longformer-large-4096": 4_0_9_6,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCamelCase = bs[:]
__UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
__UpperCamelCase = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = set()
__UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase = char
return pairs
class __lowerCamelCase (snake_case__ ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: int,A_: Optional[Any],A_: List[Any],A_: Union[str, Any]="replace",A_: Dict="<s>",A_: List[str]="</s>",A_: List[Any]="</s>",A_: Optional[Any]="<s>",A_: List[str]="<unk>",A_: List[str]="<pad>",A_: Tuple="<mask>",A_: Optional[int]=False,**A_: Optional[int],):
'''simple docstring'''
__UpperCamelCase = AddedToken(lowerCamelCase_,lstrip=lowerCamelCase_,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_,lowerCamelCase_ ) else bos_token
__UpperCamelCase = AddedToken(lowerCamelCase_,lstrip=lowerCamelCase_,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_,lowerCamelCase_ ) else eos_token
__UpperCamelCase = AddedToken(lowerCamelCase_,lstrip=lowerCamelCase_,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_,lowerCamelCase_ ) else sep_token
__UpperCamelCase = AddedToken(lowerCamelCase_,lstrip=lowerCamelCase_,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_,lowerCamelCase_ ) else cls_token
__UpperCamelCase = AddedToken(lowerCamelCase_,lstrip=lowerCamelCase_,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_,lowerCamelCase_ ) else unk_token
__UpperCamelCase = AddedToken(lowerCamelCase_,lstrip=lowerCamelCase_,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase = AddedToken(lowerCamelCase_,lstrip=lowerCamelCase_,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_,bos_token=lowerCamelCase_,eos_token=lowerCamelCase_,unk_token=lowerCamelCase_,sep_token=lowerCamelCase_,cls_token=lowerCamelCase_,pad_token=lowerCamelCase_,mask_token=lowerCamelCase_,add_prefix_space=lowerCamelCase_,**lowerCamelCase_,)
with open(lowerCamelCase_,encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(lowerCamelCase_ )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
__UpperCamelCase = errors # how to handle errors in decoding
__UpperCamelCase = bytes_to_unicode()
__UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_,encoding='utf-8' ) as merges_handle:
__UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase = dict(zip(lowerCamelCase_,range(len(lowerCamelCase_ ) ) ) )
__UpperCamelCase = {}
__UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return len(self.encoder )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return dict(self.encoder,**self.added_tokens_encoder )
def snake_case_ ( self: Any,A_: List[str] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCamelCase = tuple(lowerCamelCase_ )
__UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
__UpperCamelCase = min(lowerCamelCase_,key=lambda A_ : self.bpe_ranks.get(lowerCamelCase_,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase = bigram
__UpperCamelCase = []
__UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
__UpperCamelCase = word.index(lowerCamelCase_,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase = tuple(lowerCamelCase_ )
__UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
__UpperCamelCase = get_pairs(lowerCamelCase_ )
__UpperCamelCase = ' '.join(lowerCamelCase_ )
__UpperCamelCase = word
return word
def snake_case_ ( self: Tuple,A_: Dict ):
'''simple docstring'''
__UpperCamelCase = []
for token in re.findall(self.pat,lowerCamelCase_ ):
__UpperCamelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(' ' ) )
return bpe_tokens
def snake_case_ ( self: int,A_: Dict ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase_,self.encoder.get(self.unk_token ) )
def snake_case_ ( self: str,A_: Dict ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def snake_case_ ( self: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ''.join(lowerCamelCase_ )
__UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8',errors=self.errors )
return text
def snake_case_ ( self: List[Any],A_: List[str],A_: Any = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
lowerCamelCase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(
lowerCamelCase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase_,'w',encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=lowerCamelCase_,ensure_ascii=lowerCamelCase_ ) + '\n' )
__UpperCamelCase = 0
with open(lowerCamelCase_,'w',encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase = token_index
writer.write(' '.join(lowerCamelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case_ ( self: Optional[Any],A_: Tuple,A_: Tuple = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self: int,A_: List[str],A_: Tuple = None,A_: List[Any] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_,token_ids_a=lowerCamelCase_,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def snake_case_ ( self: Optional[Any],A_: int,A_: str = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self: Tuple,A_: Dict,A_: str=False,**A_: Tuple ):
'''simple docstring'''
__UpperCamelCase = kwargs.pop('add_prefix_space',self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCamelCase = ' ' + text
return (text, kwargs)
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : int
__lowerCAmelCase : TreeNode | None = None
__lowerCAmelCase : TreeNode | None = None
UpperCAmelCase_ : Dict = namedtuple("CoinsDistribResult", "moves excess")
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_a , _a : Optional[Any] = get_distrib(node.left )
_a , _a : Tuple = get_distrib(node.right )
_a : List[str] = 1 - left_distrib_excess
_a : str = 1 - right_distrib_excess
_a : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
_a : Optional[int] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
| 0
|
"""simple docstring"""
def snake_case ( A__ ):
if not isinstance(A__ ,A__ ):
UpperCAmelCase_ : Tuple = F"""Input value of [number={number}] must be an integer"""
raise TypeError(A__ )
if number < 1:
UpperCAmelCase_ : str = F"""Input value of [number={number}] must be > 0"""
raise ValueError(A__ )
UpperCAmelCase_ : Optional[int] = 1
for i in range(1 ,A__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''levit'''
def __init__( self : List[str] , lowerCAmelCase_ : int=224 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Tuple=[128, 256, 384] , lowerCAmelCase_ : Optional[int]=[4, 8, 12] , lowerCAmelCase_ : str=[4, 4, 4] , lowerCAmelCase_ : Dict=[16, 16, 16] , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[int]=[2, 2, 2] , lowerCAmelCase_ : Any=[2, 2, 2] , lowerCAmelCase_ : int=0.0_2 , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : str = kernel_size
UpperCAmelCase_ : List[Any] = stride
UpperCAmelCase_ : List[str] = padding
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : int = key_dim
UpperCAmelCase_ : List[str] = drop_path_rate
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Optional[int] = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
| 463
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ : int = 250004
UpperCAmelCase_ : List[str] = 250020
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ : List[str] =MBartTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MBartTokenizer(__snake_case , keep_accents=__snake_case )
__magic_name__ : int =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__magic_name__ : Optional[int] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__magic_name__ : Optional[Any] =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__magic_name__ : Any =tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def A__ ( self :Tuple ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ : Tuple =(self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__magic_name__ : List[Any] =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__magic_name__ : List[str] =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__magic_name__ : Optional[Any] =tempfile.mkdtemp()
__magic_name__ : Dict =tokenizer_r.save_pretrained(__snake_case )
__magic_name__ : Dict =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__magic_name__ : int =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__magic_name__ : Any =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : List[Any] =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
__magic_name__ : List[str] =tempfile.mkdtemp()
__magic_name__ : Optional[int] =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__magic_name__ : Dict =tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
__magic_name__ : Any =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : int =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
__magic_name__ : List[Any] =tempfile.mkdtemp()
__magic_name__ : Dict =tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
__magic_name__ : List[str] =tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ : str =tokenizer_r.from_pretrained(__snake_case )
__magic_name__ : Optional[int] =tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
UpperCamelCase = """facebook/mbart-large-en-ro"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def A__ ( cls :str ):
'''simple docstring'''
__magic_name__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__magic_name__ : Any =1
return cls
def A__ ( self :Any ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Any =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def A__ ( self :List[Any] ):
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
__magic_name__ : Union[str, Any] =[RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__magic_name__ : Optional[int] =self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : List[str] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : str =["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __snake_case )
__magic_name__ : Dict =10
__magic_name__ : Optional[Any] =self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =tempfile.mkdtemp()
__magic_name__ : Dict =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
__magic_name__ : Dict =MBartTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__snake_case , return_tensors="""pt""" )
__magic_name__ : str =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__magic_name__ : Any =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__magic_name__ : int =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple =self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors="""pt""" )
__magic_name__ : Tuple =self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors="""pt""" )
__magic_name__ : List[Any] =targets["""input_ids"""]
__magic_name__ : List[str] =shift_tokens_right(__snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 21
|
'''simple docstring'''
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.35_58_18,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase_ : Union[str, Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(_UpperCamelCase )}"""
)
raise ValueError(_UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620
| 0
|
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase : List[Any] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCamelCase : Dict = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__lowerCamelCase : List[str] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
def remove_articles(__magic_name__ ):
snake_case_ : List[Any] = re.compile(r"\b(a|an|the)\b" ,re.UNICODE )
return re.sub(__magic_name__ ," " ,__magic_name__ )
def white_space_fix(__magic_name__ ):
return " ".join(text.split() )
def remove_punc(__magic_name__ ):
snake_case_ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__magic_name__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__magic_name__ ) ) ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
return int(normalize_answer(__magic_name__ ) == normalize_answer(__magic_name__ ) )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = [any(compute_exact(__magic_name__ ,__magic_name__ ) for ref in refs ) for pred, refs in zip(__magic_name__ ,__magic_name__ )]
return (sum(__magic_name__ ) / len(__magic_name__ )) * 100
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
snake_case_ : Any = Counter(__magic_name__ )
snake_case_ : Tuple = Counter(__magic_name__ )
snake_case_ : Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
snake_case_ : Tuple = scount * numref
snake_case_ : List[str] = Counter(__magic_name__ )
snake_case_ : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
snake_case_ : Any = ccount * numref
# KEEP
snake_case_ : Optional[int] = sgramcounter_rep & cgramcounter_rep
snake_case_ : str = keepgramcounter_rep & rgramcounter
snake_case_ : str = sgramcounter_rep & rgramcounter
snake_case_ : str = 0
snake_case_ : List[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : int = 1
snake_case_ : Tuple = 1
if len(__magic_name__ ) > 0:
snake_case_ : Optional[Any] = keeptmpscorea / len(__magic_name__ )
if len(__magic_name__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case_ : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case_ : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case_ : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case_ : Optional[int] = sgramcounter_rep - cgramcounter_rep
snake_case_ : Optional[Any] = delgramcounter_rep - rgramcounter
snake_case_ : int = sgramcounter_rep - rgramcounter
snake_case_ : Dict = 0
snake_case_ : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : Dict = 1
if len(__magic_name__ ) > 0:
snake_case_ : Any = deltmpscorea / len(__magic_name__ )
# ADDITION
snake_case_ : Dict = set(__magic_name__ ) - set(__magic_name__ )
snake_case_ : Optional[int] = set(__magic_name__ ) & set(__magic_name__ )
snake_case_ : Dict = set(__magic_name__ ) - set(__magic_name__ )
snake_case_ : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
if len(__magic_name__ ) > 0:
snake_case_ : Union[str, Any] = addtmpscore / len(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : str = addtmpscore / len(__magic_name__ )
snake_case_ : Optional[int] = 0
if addscore_precision > 0 or addscore_recall > 0:
snake_case_ : int = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = len(__magic_name__ )
snake_case_ : Any = ssent.split(" " )
snake_case_ : Dict = csent.split(" " )
snake_case_ : int = []
snake_case_ : Tuple = []
snake_case_ : Union[str, Any] = []
snake_case_ : int = []
snake_case_ : int = []
snake_case_ : int = []
snake_case_ : str = []
snake_case_ : str = []
snake_case_ : List[str] = []
snake_case_ : List[Any] = []
for rsent in rsents:
snake_case_ : Optional[Any] = rsent.split(" " )
snake_case_ : Union[str, Any] = []
snake_case_ : List[str] = []
snake_case_ : Union[str, Any] = []
ragramslist.append(__magic_name__ )
for i in range(0 ,len(__magic_name__ ) - 1 ):
if i < len(__magic_name__ ) - 1:
snake_case_ : Tuple = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(__magic_name__ )
if i < len(__magic_name__ ) - 2:
snake_case_ : int = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(__magic_name__ )
if i < len(__magic_name__ ) - 3:
snake_case_ : str = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(__magic_name__ )
ragramslist.append(__magic_name__ )
ragramslist.append(__magic_name__ )
ragramslist.append(__magic_name__ )
for i in range(0 ,len(__magic_name__ ) - 1 ):
if i < len(__magic_name__ ) - 1:
snake_case_ : Optional[Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(__magic_name__ )
if i < len(__magic_name__ ) - 2:
snake_case_ : Optional[Any] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(__magic_name__ )
if i < len(__magic_name__ ) - 3:
snake_case_ : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(__magic_name__ )
for i in range(0 ,len(__magic_name__ ) - 1 ):
if i < len(__magic_name__ ) - 1:
snake_case_ : List[Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(__magic_name__ )
if i < len(__magic_name__ ) - 2:
snake_case_ : str = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(__magic_name__ )
if i < len(__magic_name__ ) - 3:
snake_case_ : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(__magic_name__ )
((snake_case_), (snake_case_), (snake_case_)) : Optional[Any] = SARIngram(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
((snake_case_), (snake_case_), (snake_case_)) : Union[str, Any] = SARIngram(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
((snake_case_), (snake_case_), (snake_case_)) : str = SARIngram(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
((snake_case_), (snake_case_), (snake_case_)) : Union[str, Any] = SARIngram(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case_ : Any = sum([delascore, delascore, delascore, delascore] ) / 4
snake_case_ : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
snake_case_ : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = True ,__magic_name__ = "13a" ,__magic_name__ = True )-> List[str]:
"""simple docstring"""
if lowercase:
snake_case_ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case_ : str = sacrebleu.metrics.bleu._get_tokenizer(__magic_name__ )()(__magic_name__ )
else:
snake_case_ : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(__magic_name__ )
elif tokenizer == "moses":
snake_case_ : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(__magic_name__ ,return_str=__magic_name__ ,escape=__magic_name__ )
elif tokenizer == "penn":
snake_case_ : Any = sacremoses.MosesTokenizer().penn_tokenize(__magic_name__ ,return_str=__magic_name__ )
else:
snake_case_ : Union[str, Any] = sentence
if not return_str:
snake_case_ : List[Any] = normalized_sent.split()
return normalized_sent
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
if not (len(__magic_name__ ) == len(__magic_name__ ) == len(__magic_name__ )):
raise ValueError("Sources length must match predictions and references lengths." )
snake_case_ : Union[str, Any] = 0
for src, pred, refs in zip(__magic_name__ ,__magic_name__ ,__magic_name__ ):
sari_score += SARIsent(normalize(__magic_name__ ) ,normalize(__magic_name__ ) ,[normalize(__magic_name__ ) for sent in refs] )
snake_case_ : str = sari_score / len(__magic_name__ )
return 100 * sari_score
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__="exp" ,__magic_name__=None ,__magic_name__=False ,__magic_name__=False ,__magic_name__=False ,)-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = len(references[0] )
if any(len(__magic_name__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
snake_case_ : List[Any] = [[refs[i] for refs in references] for i in range(__magic_name__ )]
snake_case_ : List[Any] = sacrebleu.corpus_bleu(
__magic_name__ ,__magic_name__ ,smooth_method=__magic_name__ ,smooth_value=__magic_name__ ,force=__magic_name__ ,lowercase=__magic_name__ ,use_effective_order=__magic_name__ ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = {}
result.update({"sari": compute_sari(sources=lowerCAmelCase__ , predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
result.update({"exact": compute_em(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
return result
| 656
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase ( snake_case__):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase ( nn.Module):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = (16, 32, 96, 256)
lowerCAmelCase_ = jnp.floataa
def UpperCamelCase__ ( self : Any ) -> Tuple:
_UpperCamelCase =nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase =[]
for i in range(len(self.block_out_channels ) - 1 ):
_UpperCamelCase =self.block_out_channels[i]
_UpperCamelCase =self.block_out_channels[i + 1]
_UpperCamelCase =nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase_ )
_UpperCamelCase =nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase_ )
_UpperCamelCase =blocks
_UpperCamelCase =nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
_UpperCamelCase =self.conv_in(lowerCamelCase_ )
_UpperCamelCase =nn.silu(lowerCamelCase_ )
for block in self.blocks:
_UpperCamelCase =block(lowerCamelCase_ )
_UpperCamelCase =nn.silu(lowerCamelCase_ )
_UpperCamelCase =self.conv_out(lowerCamelCase_ )
return embedding
@flax_register_to_config
class UpperCAmelCase ( nn.Module , snake_case__ , snake_case__):
"""simple docstring"""
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ = False
lowerCAmelCase_ = (320, 640, 1_280, 1_280)
lowerCAmelCase_ = 2
lowerCAmelCase_ = 8
lowerCAmelCase_ = None
lowerCAmelCase_ = 1_280
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = "rgb"
lowerCAmelCase_ = (16, 32, 96, 256)
def UpperCamelCase__ ( self : str , UpperCamelCase__ : Optional[int] ) -> FrozenDict:
# init input tensors
_UpperCamelCase =(1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase =jnp.zeros(lowerCamelCase_ , dtype=jnp.floataa )
_UpperCamelCase =jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase =(1, 3, self.sample_size * 8, self.sample_size * 8)
_UpperCamelCase =jnp.zeros(lowerCamelCase_ , dtype=jnp.floataa )
_UpperCamelCase =jax.random.split(lowerCamelCase_ )
_UpperCamelCase ={'params': params_rng, 'dropout': dropout_rng}
return self.init(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )["params"]
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase =self.block_out_channels
_UpperCamelCase =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase =self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase =FlaxTimestepEmbedding(lowerCamelCase_ , dtype=self.dtype )
_UpperCamelCase =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_UpperCamelCase =self.only_cross_attention
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase =(num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase =[]
_UpperCamelCase =[]
_UpperCamelCase =block_out_channels[0]
_UpperCamelCase =nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase_ )
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase =output_channel
_UpperCamelCase =block_out_channels[i]
_UpperCamelCase =i == len(lowerCamelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_UpperCamelCase =FlaxDownBlockaD(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase_ )
for _ in range(self.layers_per_block ):
_UpperCamelCase =nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase_ )
if not is_final_block:
_UpperCamelCase =nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase_ )
_UpperCamelCase =down_blocks
_UpperCamelCase =controlnet_down_blocks
# mid
_UpperCamelCase =block_out_channels[-1]
_UpperCamelCase =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_UpperCamelCase =nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] = 1.0 , UpperCamelCase__ : List[str] = True , UpperCamelCase__ : Optional[int] = False , ) -> Union[FlaxControlNetOutput, Tuple]:
_UpperCamelCase =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_UpperCamelCase =jnp.flip(lowerCamelCase_ , axis=1 )
# 1. time
if not isinstance(lowerCamelCase_ , jnp.ndarray ):
_UpperCamelCase =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase =timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase =jnp.expand_dims(lowerCamelCase_ , 0 )
_UpperCamelCase =self.time_proj(lowerCamelCase_ )
_UpperCamelCase =self.time_embedding(lowerCamelCase_ )
# 2. pre-process
_UpperCamelCase =jnp.transpose(lowerCamelCase_ , (0, 2, 3, 1) )
_UpperCamelCase =self.conv_in(lowerCamelCase_ )
_UpperCamelCase =jnp.transpose(lowerCamelCase_ , (0, 2, 3, 1) )
_UpperCamelCase =self.controlnet_cond_embedding(lowerCamelCase_ )
sample += controlnet_cond
# 3. down
_UpperCamelCase =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase =down_block(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , deterministic=not train )
else:
_UpperCamelCase =down_block(lowerCamelCase_ , lowerCamelCase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_UpperCamelCase =self.mid_block(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , deterministic=not train )
# 5. contronet blocks
_UpperCamelCase =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase_ , self.controlnet_down_blocks ):
_UpperCamelCase =controlnet_block(lowerCamelCase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase =controlnet_down_block_res_samples
_UpperCamelCase =self.controlnet_mid_block(lowerCamelCase_ )
# 6. scaling
_UpperCamelCase =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase_ , mid_block_res_sample=lowerCamelCase_ )
| 404
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : List[Any] = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : List[Any] = cos(A )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : Dict = (1 - _cos) / 2
_a : Any = 1 - _cos
_a : Any = 1 + alpha
_a : int = -2 * _cos
_a : str = 1 - alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : int = tau * frequency / samplerate
_a : int = sin(A )
_a : Union[str, Any] = cos(A )
_a : int = _sin / (2 * q_factor)
_a : Dict = (1 + _cos) / 2
_a : int = -1 - _cos
_a : Optional[int] = 1 + alpha
_a : str = -2 * _cos
_a : Dict = 1 - alpha
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : str = tau * frequency / samplerate
_a : Dict = sin(A )
_a : int = cos(A )
_a : Dict = _sin / (2 * q_factor)
_a : List[Any] = _sin / 2
_a : List[str] = 0
_a : Dict = -ba
_a : List[Any] = 1 + alpha
_a : Union[str, Any] = -2 * _cos
_a : List[Any] = 1 - alpha
_a : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
_a : Optional[Any] = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : Tuple = cos(A )
_a : Dict = _sin / (2 * q_factor)
_a : List[Any] = 1 - alpha
_a : int = -2 * _cos
_a : List[Any] = 1 + alpha
_a : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Union[str, Any] = tau * frequency / samplerate
_a : str = sin(A )
_a : str = cos(A )
_a : List[Any] = _sin / (2 * q_factor)
_a : Optional[Any] = 1_0 ** (gain_db / 4_0)
_a : Dict = 1 + alpha * big_a
_a : str = -2 * _cos
_a : Tuple = 1 - alpha * big_a
_a : Tuple = 1 + alpha / big_a
_a : str = -2 * _cos
_a : Union[str, Any] = 1 - alpha / big_a
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Optional[int] = tau * frequency / samplerate
_a : List[str] = sin(A )
_a : Tuple = cos(A )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : str = 1_0 ** (gain_db / 4_0)
_a : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
_a : List[str] = (big_a + 1) + (big_a - 1) * _cos
_a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_a : Dict = (big_a - 1) + (big_a + 1) * _cos
_a : Tuple = 2 * sqrt(A ) * alpha
_a : Any = big_a * (pmc + aaa)
_a : Optional[int] = 2 * big_a * mpc
_a : Dict = big_a * (pmc - aaa)
_a : List[str] = ppmc + aaa
_a : int = -2 * pmpc
_a : Tuple = ppmc - aaa
_a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
_a : Dict = tau * frequency / samplerate
_a : Tuple = sin(A )
_a : Any = cos(A )
_a : int = _sin / (2 * q_factor)
_a : str = 1_0 ** (gain_db / 4_0)
_a : List[Any] = (big_a + 1) - (big_a - 1) * _cos
_a : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
_a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_a : List[str] = (big_a - 1) + (big_a + 1) * _cos
_a : Union[str, Any] = 2 * sqrt(A ) * alpha
_a : Optional[Any] = big_a * (ppmc + aaa)
_a : List[str] = -2 * big_a * pmpc
_a : Any = big_a * (ppmc - aaa)
_a : List[Any] = pmc + aaa
_a : Tuple = 2 * mpc
_a : List[Any] = pmc - aaa
_a : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 120
| 0
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case__ )
def __a ( self ):
UpperCamelCase__ = logging.get_verbosity()
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case__ ) as cl:
logger.warning(snake_case__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(snake_case__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __a ( self ):
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case__ )
UpperCamelCase__ = logging.log_levels[env_level_str]
UpperCamelCase__ = logging.get_verbosity()
self.assertEqual(
snake_case__ , snake_case__ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase__ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __a ( self ):
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ = logging.logging.getLogger()
with CaptureLogger(snake_case__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case__ ) as cl:
logger.warning_advice(snake_case__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case__ ) as cl:
logger.warning_advice(snake_case__ )
self.assertEqual(cl.out , msg + "\n" )
def _UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 719
|
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a__ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a__ : Union[str, Any] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
a__ : int = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a__ : Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a__ : Any = 'allenai'
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = dict((re.sub(R"@@$" , "" , __A ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __A ), v) for k, v in d.items() )
UpperCamelCase__ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
UpperCamelCase__ = d[k] # restore
return da
def _UpperCamelCase ( __A , __A ) -> Tuple:
'''simple docstring'''
assert os.path.exists(__A )
os.makedirs(__A , exist_ok=__A )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCamelCase__ = basename(__A )
UpperCamelCase__ = dirname(__A )
UpperCamelCase__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCamelCase__ = cls.hub_models()
UpperCamelCase__ = {"bpe": "fastbpe", "tokenizer": "moses"}
UpperCamelCase__ = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
UpperCamelCase__ = hub_utils.from_pretrained(
__A , __A , __A , archive_map=__A , **__A )
UpperCamelCase__ = vars(chkpt["args"]["model"] )
UpperCamelCase__ = args["source_lang"]
UpperCamelCase__ = args["target_lang"]
UpperCamelCase__ = dirname(__A )
UpperCamelCase__ = basename(__A )
# dicts
UpperCamelCase__ = os.path.join(__A , F'''dict.{src_lang}.txt''' )
UpperCamelCase__ = os.path.join(__A , F'''dict.{tgt_lang}.txt''' )
UpperCamelCase__ = Dictionary.load(__A )
UpperCamelCase__ = rewrite_dict_keys(src_dict.indices )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = os.path.join(__A , "vocab-src.json" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCamelCase__ = True
for k in src_vocab.keys():
if not k.islower():
UpperCamelCase__ = False
break
UpperCamelCase__ = Dictionary.load(__A )
UpperCamelCase__ = rewrite_dict_keys(tgt_dict.indices )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = os.path.join(__A , "vocab-tgt.json" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
UpperCamelCase__ = os.path.join(__A , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCamelCase__ = os.path.join(__A , __A )
if os.path.exists(__A ):
break
with open(__A , encoding="utf-8" ) as fin:
UpperCamelCase__ = fin.read()
UpperCamelCase__ = re.sub(R" \d+$" , "" , __A , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(__A , "w" , encoding="utf-8" ) as fout:
fout.write(__A )
# model config
UpperCamelCase__ = os.path.join(__A , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCamelCase__ = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
UpperCamelCase__ = 5
UpperCamelCase__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCamelCase__ = best_score_hparams[model_dir]["length_penalty"]
else:
UpperCamelCase__ = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
UpperCamelCase__ = os.path.join(__A , __A )
UpperCamelCase__ = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
UpperCamelCase__ = chkpt["models"][0]
UpperCamelCase__ = model.state_dict()
# rename keys to start with 'model.'
UpperCamelCase__ = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCamelCase__ = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
UpperCamelCase__ = FSMTConfig.from_pretrained(__A )
UpperCamelCase__ = FSMTForConditionalGeneration(__A )
# check that it loads ok
model_new.load_state_dict(__A , strict=__A )
# save
UpperCamelCase__ = os.path.join(__A , __A )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(__A , __A )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__ : Optional[int] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 223
| 0
|
"""simple docstring"""
def UpperCAmelCase_ ( __a : int = 10 ):
'''simple docstring'''
if not isinstance(__a , __a ) or n < 0:
raise ValueError('Invalid input' )
_lowerCamelCase : Optional[Any] = 10**n
_lowerCamelCase : Tuple = 2_84_33 * (pow(2 , 7_83_04_57 , __a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 437
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : List[Any] = """xlm-prophetnet"""
a_ : Any = ["""past_key_values"""]
a_ : Optional[int] = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self , A = 0.1 , A = "gelu" , A = 3_0522 , A = 1024 , A = 4096 , A = 12 , A = 16 , A = 4096 , A = 12 , A = 16 , A = 0.1 , A = 0.1 , A = 512 , A = 0.0_2 , A = True , A = True , A = 0 , A = 2 , A = 32 , A = 128 , A = False , A = 0.0 , A = True , A = 0 , A = 1 , A = 2 , **A , ):
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : List[Any] = encoder_ffn_dim
_lowerCamelCase : Any = num_encoder_layers
_lowerCamelCase : Any = num_encoder_attention_heads
_lowerCamelCase : List[str] = decoder_ffn_dim
_lowerCamelCase : Optional[int] = num_decoder_layers
_lowerCamelCase : List[str] = num_decoder_attention_heads
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Optional[Any] = init_std # Normal(0, this parameter)
_lowerCamelCase : Optional[int] = activation_function
# parameters for xlmprophetnet
_lowerCamelCase : Any = ngram
_lowerCamelCase : Dict = num_buckets
_lowerCamelCase : Dict = relative_max_distance
_lowerCamelCase : Optional[Any] = disable_ngram_loss
_lowerCamelCase : Union[str, Any] = eps
# 3 Types of Dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Optional[int] = dropout
_lowerCamelCase : List[Any] = use_cache
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , add_cross_attention=A , decoder_start_token_id=A , **A , )
@property
def _lowerCAmelCase ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _lowerCAmelCase ( self , A ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 437
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ProphetNetTokenizer
lowerCamelCase__ = False
def A ( self : Tuple ) -> List[str]:
super().setUp()
UpperCAmelCase : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A ( self : Tuple , __snake_case : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Tuple = '''UNwant\u00E9d,running'''
UpperCAmelCase : str = '''unwanted, running'''
return input_text, output_text
def A ( self : Dict ) -> Dict:
UpperCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def A ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self : Dict ) -> str:
UpperCAmelCase : List[Any] = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self : int ) -> int:
UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Any = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Any ) -> int:
UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : str ) -> List[str]:
UpperCAmelCase : List[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self : str ) -> List[str]:
UpperCAmelCase : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase : List[str] = {}
for i, token in enumerate(__snake_case ):
UpperCAmelCase : str = i
UpperCAmelCase : Any = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
UpperCAmelCase : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase : Optional[Any] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
UpperCAmelCase : List[Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A ( self : Optional[int] ) -> Dict:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self : Optional[int] ) -> int:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self : Dict ) -> Optional[int]:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def A ( self : Any ) -> List[str]:
UpperCAmelCase : str = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
UpperCAmelCase : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
UpperCAmelCase : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 528
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
UpperCAmelCase : Optional[Any] = [False] * len(_lowerCAmelCase )
UpperCAmelCase : Dict = []
queue.append(_lowerCAmelCase )
UpperCAmelCase : str = True
while queue:
UpperCAmelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[int] = u
return visited[t]
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ) -> Any:
# This array is filled by BFS and to store path
UpperCAmelCase : Dict = [-1] * (len(_lowerCAmelCase ))
UpperCAmelCase : Any = 0
while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : List[str] = float('''Inf''' )
UpperCAmelCase : str = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : int = min(_lowerCAmelCase , graph[parent[s]][s] )
UpperCAmelCase : Tuple = parent[s]
max_flow += path_flow
UpperCAmelCase : Optional[Any] = sink
while v != source:
UpperCAmelCase : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Dict = parent[v]
return max_flow
UpperCamelCase__: Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase__ , UpperCamelCase__: int = 0, 5
print(ford_fulkerson(graph, source, sink))
| 528
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 421
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 421
| 1
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['pixel_values']
def __init__( self : Union[str, Any] , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : int = 8 , **_UpperCamelCase : Dict , ) ->None:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : Tuple = do_rescale
_lowerCamelCase : str = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[str]) ->np.ndarray:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = get_image_size(_UpperCamelCase)
_lowerCamelCase : str = (old_height // size + 1) * size - old_height
_lowerCamelCase : List[Any] = (old_width // size + 1) * size - old_width
return pad(_UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : ImageInput , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[float] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_UpperCamelCase : str , ) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : List[Any] = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : List[str] = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : str = make_list_of_images(_UpperCamelCase)
if not valid_images(_UpperCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[int] = [to_numpy_array(_UpperCamelCase) for image in images]
if do_rescale:
_lowerCamelCase : Union[str, Any] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase) for image in images]
if do_pad:
_lowerCamelCase : Union[str, Any] = [self.pad(_UpperCamelCase , size=_UpperCamelCase) for image in images]
_lowerCamelCase : Optional[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase) for image in images]
_lowerCamelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase)
| 15
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15
| 1
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase_ ( A_ ):
'''simple docstring'''
def __init__( self , a=0.01 , a=10_00 ) -> Union[str, Any]:
snake_case_ = p_stop
snake_case_ = max_length
def __iter__( self ) -> List[Any]:
snake_case_ = 0
snake_case_ = False
while not stop and count < self.max_length:
yield count
count += 1
snake_case_ = random.random() < self.p_stop
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a=False , a=True ) -> Optional[int]:
snake_case_ = [
BatchSamplerShard(_snake_case , 2 , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
for i in range(2 )
]
snake_case_ = [list(_snake_case ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_snake_case ) for shard in batch_sampler_shards] , [len(_snake_case ) for e in expected] )
self.assertListEqual(_snake_case , _snake_case )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case )
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=_snake_case )
# Expected shouldn't change
self.check_batch_sampler_shards(_snake_case , _snake_case )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case )
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case )
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case )
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_snake_case , _snake_case )
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [[], []]
self.check_batch_sampler_shards(_snake_case , _snake_case )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case )
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=_snake_case )
# Expected shouldn't change
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case )
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case )
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case )
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [[], []]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=_snake_case )
# Expected shouldn't change
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [[[0, 1]], []]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=_snake_case )
snake_case_ = [[], []]
self.check_batch_sampler_shards(_snake_case , _snake_case , even_batches=_snake_case )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=_snake_case )
# Expected shouldn't change
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [[[0, 1]], []]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = [[], []]
self.check_batch_sampler_shards(_snake_case , _snake_case , split_batches=_snake_case , even_batches=_snake_case )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
snake_case_ = [BatchSamplerShard(_snake_case , 2 , _snake_case , even_batches=_snake_case ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _UpperCamelCase ( self , a , a , a , a=False , a=2 , a=False ) -> Optional[Any]:
random.seed(_snake_case )
snake_case_ = list(_snake_case )
snake_case_ = [
IterableDatasetShard(
_snake_case , batch_size=_snake_case , drop_last=_snake_case , num_processes=_snake_case , process_index=_snake_case , split_batches=_snake_case , )
for i in range(_snake_case )
]
snake_case_ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_snake_case )
iterable_dataset_lists.append(list(_snake_case ) )
snake_case_ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
snake_case_ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
self.assertTrue(len(_snake_case ) % shard_batch_size == 0 )
snake_case_ = []
for idx in range(0 , len(_snake_case ) , _snake_case ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_snake_case ) < len(_snake_case ):
reference += reference
self.assertListEqual(_snake_case , reference[: len(_snake_case )] )
def _UpperCamelCase ( self ) -> str:
snake_case_ = 42
snake_case_ = RandomIterableDataset()
self.check_iterable_dataset_shards(_snake_case , _snake_case , batch_size=4 , drop_last=_snake_case , split_batches=_snake_case )
self.check_iterable_dataset_shards(_snake_case , _snake_case , batch_size=4 , drop_last=_snake_case , split_batches=_snake_case )
self.check_iterable_dataset_shards(_snake_case , _snake_case , batch_size=4 , drop_last=_snake_case , split_batches=_snake_case )
self.check_iterable_dataset_shards(_snake_case , _snake_case , batch_size=4 , drop_last=_snake_case , split_batches=_snake_case )
# Edge case with a very small dataset
snake_case_ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_snake_case , _snake_case , batch_size=4 , drop_last=_snake_case , split_batches=_snake_case )
self.check_iterable_dataset_shards(_snake_case , _snake_case , batch_size=4 , drop_last=_snake_case , split_batches=_snake_case )
self.check_iterable_dataset_shards(_snake_case , _snake_case , batch_size=4 , drop_last=_snake_case , split_batches=_snake_case )
self.check_iterable_dataset_shards(_snake_case , _snake_case , batch_size=4 , drop_last=_snake_case , split_batches=_snake_case )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = BatchSampler(range(16 ) , batch_size=4 , drop_last=_snake_case )
snake_case_ = SkipBatchSampler(_snake_case , 2 )
self.assertListEqual(list(_snake_case ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> int:
snake_case_ = DataLoader(list(range(16 ) ) , batch_size=4 )
snake_case_ = skip_first_batches(_snake_case , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_snake_case ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_snake_case ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _UpperCamelCase ( self ) -> Optional[int]:
Accelerator()
snake_case_ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_snake_case ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_snake_case ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 198
|
"""simple docstring"""
class __A :
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : str ,_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Tuple = None
lowercase__ : str = None
lowercase__ : Dict = graph
self._normalize_graph(_snake_case ,_snake_case )
lowercase__ : Any = len(_snake_case )
lowercase__ : Any = None
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[str] ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
if sources is int:
lowercase__ : Optional[int] = [sources]
if sinks is int:
lowercase__ : str = [sinks]
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
return
lowercase__ : str = sources[0]
lowercase__ : Optional[int] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_snake_case ) > 1 or len(_snake_case ) > 1:
lowercase__ : Tuple = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase__ : Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
lowercase__ : Optional[Any] = max_input_flow
lowercase__ : Dict = 0
lowercase__ : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase__ : List[str] = max_input_flow
lowercase__ : int = size - 1
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase ( self : str ,_snake_case : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : Tuple = algorithm(self )
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : int = flow_network
lowercase__ : int = flow_network.verticesCount
lowercase__ : Tuple = flow_network.sourceIndex
lowercase__ : str = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase__ : Optional[Any] = flow_network.graph
lowercase__ : Optional[int] = False
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase__ : Tuple = True
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_snake_case )
# use this to save your result
lowercase__ : Union[str, Any] = -1
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase__ : List[str] = [0] * self.verticies_count
lowercase__ : Tuple = [0] * self.verticies_count
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : str = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase__ : Union[str, Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase__ : Tuple = 0
while i < len(_snake_case ):
lowercase__ : Dict = vertices_list[i]
lowercase__ : Optional[Any] = self.heights[vertex_index]
self.process_vertex(_snake_case )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(_snake_case ) )
lowercase__ : Optional[int] = 0
else:
i += 1
lowercase__ : Dict = sum(self.preflow[self.source_index] )
def UpperCAmelCase ( self : Any ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_snake_case ,_snake_case )
self.relabel(_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : int ,_snake_case : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : int = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase__ : Optional[int] = self.heights[to_index]
if min_height is not None:
lowercase__ : Optional[int] = min_height + 1
if __name__ == "__main__":
lowerCAmelCase_ = [0]
lowerCAmelCase_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCAmelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCAmelCase_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCAmelCase_ = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 560
| 0
|
from statistics import mean
import numpy as np
def __UpperCAmelCase ( __A , __A , __A , __A ) -> list:
'''simple docstring'''
UpperCAmelCase__ = 0
# Number of processes finished
UpperCAmelCase__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCAmelCase__ = [0] * no_of_process
# List to include calculation results
UpperCAmelCase__ = [0] * no_of_process
# Sort by arrival time.
UpperCAmelCase__ = [burst_time[i] for i in np.argsort(__A )]
UpperCAmelCase__ = [process_name[i] for i in np.argsort(__A )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCAmelCase__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCAmelCase__ = arrival_time[i]
UpperCAmelCase__ = 0
# Index showing the location of the process being performed
UpperCAmelCase__ = 0
# Saves the current response ratio.
UpperCAmelCase__ = 0
for i in range(0 , __A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCAmelCase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCAmelCase__ = temp
UpperCAmelCase__ = i
# Calculate the turn around time
UpperCAmelCase__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCAmelCase__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCAmelCase ( __A , __A , __A , __A ) -> list:
'''simple docstring'''
UpperCAmelCase__ = [0] * no_of_process
for i in range(0 , __A ):
UpperCAmelCase__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A = 5
A = ["A", "B", "C", "D", "E"]
A = [1, 2, 3, 4, 5]
A = [1, 2, 3, 4, 5]
A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
f"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(f"average waiting time : {mean(waiting_time):.5f}")
print(f"average turn around time : {mean(turn_around_time):.5f}")
| 277
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , _lowercase : int , _lowercase : List[Any]=None , _lowercase : str=True , _lowercase : str=None , **_lowercase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = config_class
UpperCAmelCase__ = has_text_modality
UpperCAmelCase__ = kwargs
UpperCAmelCase__ = common_properties
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
UpperCAmelCase__ = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowercase , _lowercase ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowercase ):
try:
setattr(_lowercase , _lowercase , _lowercase )
self.parent.assertEqual(
getattr(_lowercase , _lowercase ) , _lowercase , msg=F"""`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowercase ):
try:
UpperCAmelCase__ = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowercase , _lowercase ) , _lowercase , msg=F"""`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
UpperCAmelCase__ = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowercase )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = os.path.join(_lowercase , "config.json" )
config_first.to_json_file(_lowercase )
UpperCAmelCase__ = self.config_class.from_json_file(_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowercase )
UpperCAmelCase__ = self.config_class.from_pretrained(_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
UpperCAmelCase__ = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = os.path.join(_lowercase , _lowercase )
config_first.save_pretrained(_lowercase )
UpperCAmelCase__ = self.config_class.from_pretrained(_lowercase , subfolder=_lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCAmelCase__ = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
if self.config_class.is_composition:
return
UpperCAmelCase__ = self.config_class()
self.parent.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(_lowercase )
UpperCAmelCase__ = self.config_class(**_lowercase )
UpperCAmelCase__ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(_lowercase , _lowercase ) != value:
wrong_values.append((key, getattr(_lowercase , _lowercase ), value) )
if len(_lowercase ) > 0:
UpperCAmelCase__ = "\n".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 277
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Optional[int] = 16
_lowerCAmelCase : List[Any] = 32
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 16 ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Any = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : Dict = 8
else:
_lowerCamelCase : Optional[int] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCamelCase : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : Dict = mocked_dataloaders # noqa: F811
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCamelCase : Optional[Any] = 2
# Initialize accelerator
_lowerCamelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Any = config["lr"]
_lowerCamelCase : Tuple = int(config["num_epochs"] )
_lowerCamelCase : Union[str, Any] = int(config["seed"] )
_lowerCamelCase : Optional[Any] = int(config["batch_size"] )
_lowerCamelCase : Tuple = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Dict = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Any = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : Any = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : Dict = model(**_lowerCamelCase )
_lowerCamelCase : List[str] = outputs.loss
_lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_lowerCamelCase : Any = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : str = model(**_lowerCamelCase )
_lowerCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
_lowerCamelCase, _lowerCamelCase : int = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_lowerCamelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCamelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _lowerCamelCase )
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : int = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 46
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCAmelCase )} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
lowerCamelCase :float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowerCamelCase :float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
lowerCamelCase :int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
lowerCamelCase :int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def snake_case ( snake_case__ :DataTrainingArguments , snake_case__ :PreTrainedTokenizer , snake_case__ :bool = False , snake_case__ :Optional[str] = None , ) -> Optional[int]:
def _dataset(snake_case__ :Optional[int] , snake_case__ :Optional[int]=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""")
return LineByLineWithRefDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , ref_path=snake_case__ , )
return LineByLineTextDataset(tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size)
else:
return TextDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case__) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def snake_case ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""")
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case__)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
_A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
_A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""")
if model_args.tokenizer_name:
_A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
_A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""")
if model_args.model_name_or_path:
_A = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""")
_A = AutoModelWithLMHead.from_config(snake_case__)
model.resize_token_embeddings(len(snake_case__))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""")
if data_args.block_size <= 0:
_A = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_A = min(data_args.block_size , tokenizer.max_len)
# Get datasets
_A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
_A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , evaluate=snake_case__ , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_A = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_A = DataCollatorForWholeWordMask(
tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability)
else:
_A = DataCollatorForLanguageModeling(
tokenizer=snake_case__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
_A = Trainer(
model=snake_case__ , args=snake_case__ , data_collator=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , prediction_loss_only=snake_case__ , )
# Training
if training_args.do_train:
_A = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=snake_case__)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = math.exp(eval_output["""eval_loss"""])
_A = {"""perplexity""": perplexity}
_A = os.path.join(training_args.output_dir , """eval_results_lm.txt""")
if trainer.is_world_master():
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key in sorted(result.keys()):
logger.info(""" %s = %s""" , snake_case__ , str(result[key]))
writer.write("""%s = %s\n""" % (key, str(result[key])))
results.update(snake_case__)
return results
def snake_case ( snake_case__ :Dict) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 401
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableUnCLIPPipeline
__snake_case = TEXT_TO_IMAGE_PARAMS
__snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__snake_case = False
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = 3_2
_SCREAMING_SNAKE_CASE : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=UpperCamelCase_ , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=UpperCamelCase_ , num_layers=1 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=UpperCamelCase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase_ , layers_per_block=1 , upcast_attention=UpperCamelCase_ , use_linear_projection=UpperCamelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL()
_SCREAMING_SNAKE_CASE : int = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=0 ) -> Tuple:
if str(UpperCamelCase_ ).startswith("mps" ):
_SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(UpperCamelCase_ )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase_ )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase_ )
@slow
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
_SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : int = torch.Generator(device="cpu" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = pipe("anime turle" , generator=UpperCamelCase_ , output_type="np" )
_SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase_ ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : Optional[int] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
_SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 710
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
_SCREAMING_SNAKE_CASE : List[str] = str(bin(__lowerCamelCase ) )[2:]
_SCREAMING_SNAKE_CASE : Optional[Any] = max(len(__lowerCamelCase ), len(__lowerCamelCase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ), b_binary.zfill(__lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , ):
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = '''gelu'''
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFEsmModel(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=UpperCAmelCase )
lowerCamelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase , encoder_hidden_states=UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFEsmForMaskedLM(config=UpperCAmelCase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a__: Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a__: str = False
a__: Tuple = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(UpperCAmelCase , UpperCAmelCase )
for k, v in name.items():
assert isinstance(UpperCAmelCase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 29
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a_ ( __a ):
return 1 / (1 + np.exp(-z ))
def a_ ( __a , __a ):
return (-y * np.log(__a ) - (1 - y) * np.log(1 - h )).mean()
def a_ ( __a , __a , __a ):
A__ = np.dot(__a , __a )
return np.sum(y * scores - np.log(1 + np.exp(__a ) ) )
def a_ ( __a , __a , __a , __a=7_0000 ):
A__ = np.zeros(x.shape[1] )
for iterations in range(__a ):
A__ = np.dot(__a , __a )
A__ = sigmoid_function(__a )
A__ = np.dot(x.T , h - y ) / y.size
A__ = theta - alpha * gradient # updating the weights
A__ = np.dot(__a , __a )
A__ = sigmoid_function(__a )
A__ = cost_function(__a , __a )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__snake_case : List[Any] = datasets.load_iris()
__snake_case : List[Any] = iris.data[:, :2]
__snake_case : List[str] = (iris.target != 0) * 1
__snake_case : List[Any] = 0.1
__snake_case : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def a_ ( __a ):
return sigmoid_function(
np.dot(__a , __a ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((__snake_case) , (__snake_case)) : Tuple = (x[:, 0].min(), x[:, 0].max())
((__snake_case) , (__snake_case)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__snake_case) , (__snake_case)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__snake_case : int = np.c_[xxa.ravel(), xxa.ravel()]
__snake_case : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 571
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
A_ = [False] * len(UpperCAmelCase__ )
A_ = []
queue.append(UpperCAmelCase__ )
A_ = True
while queue:
A_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase__ )
A_ = True
A_ = u
return visited[t]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
# This array is filled by BFS and to store path
A_ = [-1] * (len(UpperCAmelCase__ ))
A_ = 0
while bfs(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
A_ = float("""Inf""" )
A_ = sink
while s != source:
# Find the minimum value in select path
A_ = min(UpperCAmelCase__, graph[parent[s]][s] )
A_ = parent[s]
max_flow += path_flow
A_ = sink
while v != source:
A_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A_ = parent[v]
return max_flow
__lowerCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__lowerCamelCase , __lowerCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 667
|
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
_A = max(
mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def lowerCamelCase_ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase_ ( __UpperCamelCase : int , __UpperCamelCase : list , __UpperCamelCase : list ) -> List[Any]:
"""simple docstring"""
if not (isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
_A = len(__lowerCAmelCase )
if num_items != len(__lowerCAmelCase ):
_A = (
'The number of weights must be the same as the number of values.\n'
F'But got {num_items} weights and {len(__lowerCAmelCase )} values'
)
raise ValueError(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
if not isinstance(wt[i] , __lowerCAmelCase ):
_A = (
'All weights must be integers but got weight of '
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(__lowerCAmelCase )
_A , _A = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = set()
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return optimal_val, example_optional_set
def lowerCamelCase_ ( __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : set ) -> List[Any]:
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , __lowerCAmelCase , __lowerCAmelCase )
else:
optimal_set.add(__lowerCAmelCase )
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , j - wt[i - 1] , __lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase = [3, 2, 4, 4]
lowerCAmelCase = [4, 3, 2, 3]
lowerCAmelCase = 4
lowerCAmelCase = 6
lowerCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 292
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 335
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __A ( __a ):
'''simple docstring'''
lowerCAmelCase_ = "trocr"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , __lowerCAmelCase=5_0_2_6_5 , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=4_0_9_6 , __lowerCAmelCase="gelu" , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_model
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = init_std
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = use_cache
lowerCamelCase__ = scale_embedding
lowerCamelCase__ = use_learned_position_embeddings
lowerCamelCase__ = layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 718
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : int = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.mean(1 )
# Centralize the data of class i
SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 )
SCREAMING_SNAKE_CASE__ : List[str] = np.nan
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i]
SCREAMING_SNAKE_CASE__ : int = data.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE__ : str = device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def _a ( lowercase__ : np.ndarray , lowercase__ : int ):
'''simple docstring'''
if features.any():
SCREAMING_SNAKE_CASE__ : Any = features.mean(1 )
# Center the dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ )
logging.error('Dataset empty' )
raise AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] )
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__UpperCamelCase , n - 1 , __UpperCamelCase ) * a) % mod
else:
A_ = binary_exponentiation(__UpperCamelCase , n / 2 , __UpperCamelCase )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : int = 10_0000_0000
SCREAMING_SNAKE_CASE : Optional[Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 141
| 0
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
SCREAMING_SNAKE_CASE : str = 100
SCREAMING_SNAKE_CASE : str = set(range(3, NUM_PRIMES, 2))
primes.add(2)
SCREAMING_SNAKE_CASE : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def lowercase ( _snake_case : int ) ->set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case : set[int] = set()
__snake_case : int
__snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowercase ( _snake_case : int = 5_000 ) ->int | None:
"""simple docstring"""
for number_to_partition in range(1 , _snake_case ):
if len(partition(_snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 702
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase ( _snake_case : Optional[Any] ) ->Any:
"""simple docstring"""
return x + 2
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''x = 3'''
__snake_case : List[Any] = {}
__snake_case : Tuple = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3} )
__snake_case : Tuple = '''x = y'''
__snake_case : List[Any] = {'''y''': 5}
__snake_case : Union[str, Any] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 5, '''y''': 5} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''y = add_two(x)'''
__snake_case : Any = {'''x''': 3}
__snake_case : Dict = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case : Union[str, Any] = evaluate(a_ , {} , state=a_ )
assert result is None
assert "tried to execute add_two" in out.out
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''x = 3'''
__snake_case : Union[str, Any] = {}
__snake_case : int = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
__snake_case : int = {'''x''': 3}
__snake_case : Optional[Any] = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(a_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''x = 3\ny = 5'''
__snake_case : Any = {}
__snake_case : Any = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''text = f\'This is x: {x}.\''''
__snake_case : int = {'''x''': 3}
__snake_case : List[str] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
__snake_case : int = {'''x''': 3}
__snake_case : Union[str, Any] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 2} )
__snake_case : Tuple = {'''x''': 8}
__snake_case : Optional[Any] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 8, '''y''': 5} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = '''test_list = [x, add_two(x)]'''
__snake_case : Any = {'''x''': 3}
__snake_case : str = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
self.assertListEqual(a_ , [3, 5] )
self.assertDictEqual(a_ , {'''x''': 3, '''test_list''': [3, 5]} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''y = x'''
__snake_case : List[Any] = {'''x''': 3}
__snake_case : List[str] = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 3} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''test_list = [x, add_two(x)]\ntest_list[1]'''
__snake_case : Tuple = {'''x''': 3}
__snake_case : Dict = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''test_list''': [3, 5]} )
__snake_case : int = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
__snake_case : str = {'''x''': 3}
__snake_case : Union[str, Any] = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''x = 0\nfor i in range(3):\n x = i'''
__snake_case : List[Any] = {}
__snake_case : Optional[int] = evaluate(a_ , {'''range''': range} , state=a_ )
assert result == 2
self.assertDictEqual(a_ , {'''x''': 2, '''i''': 2} )
| 229
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case_ ( A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : int = u
for i in range(1, A_ ):
_lowerCamelCase : int = temp * (u - i)
return temp
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = int(input('''enter the numbers of values: ''' ) )
_lowerCamelCase : list[list[float]] = []
for _ in range(A_ ):
y.append([] )
for i in range(A_ ):
for j in range(A_ ):
y[i].append(A_ )
_lowerCamelCase : str = 0
print('''enter the values of parameters in a list: ''' )
_lowerCamelCase : Tuple = list(map(A_, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A_ ):
_lowerCamelCase : Any = float(input() )
_lowerCamelCase : Any = int(input('''enter the value to interpolate: ''' ) )
_lowerCamelCase : Optional[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, A_ ):
for j in range(n - i ):
_lowerCamelCase : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
_lowerCamelCase : str = y[0][0]
for i in range(1, A_ ):
summ += (ucal(A_, A_ ) * y[0][i]) / math.factorial(A_ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 83
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase : int = None
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCamelCase : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCamelCase : List[str] = '▁'
# Segments (not really needed)
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[int] = 1
lowerCamelCase : int = 2
lowerCamelCase : Optional[int] = 3
lowerCamelCase : Optional[int] = 4
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = """left"""
_snake_case = XLNetTokenizer
def __init__( self , A=None , A=None , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , **A , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
snake_case : Optional[int] = 3
snake_case : str = do_lower_case
snake_case : Tuple = remove_space
snake_case : Optional[Any] = keep_accents
snake_case : Optional[int] = vocab_file
snake_case : List[str] = False if not self.vocab_file else True
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Optional[int] = [self.sep_token_id]
snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Optional[int] = [self.sep_token_id]
snake_case : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 587
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : str = [], []
__lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase , __lowerCamelCase : List[str] = sorted_examples[0]
def is_too_big(UpperCAmelCase : Optional[Any] ):
return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase : Union[str, Any] = new_src + """ """ + src
__lowerCamelCase : str = new_tgt + """ """ + tgt
if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
return finished_src, finished_tgt
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = Path(UpperCAmelCase )
save_path.mkdir(exist_ok=UpperCAmelCase )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase , __lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" )
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCAmelCase )
parser.add_argument("""--save_path""" , type=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = parser.parse_args()
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 458
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__UpperCamelCase : Optional[int] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__lowerCamelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
__lowerCamelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
__lowerCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase : List[str] = value
elif weight_type == "weight_g":
__lowerCamelCase : Dict = value
elif weight_type == "weight_v":
__lowerCamelCase : str = value
elif weight_type == "bias":
__lowerCamelCase : Optional[Any] = value
else:
__lowerCamelCase : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
"""simple docstring"""
__lowerCamelCase : int = []
__lowerCamelCase : int = fairseq_model.state_dict()
__lowerCamelCase : List[str] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase : int = True
if "*" in mapped_key:
__lowerCamelCase : Optional[int] = name.split(UpperCAmelCase )[0].split(""".""" )[-2]
__lowerCamelCase : Optional[int] = mapped_key.replace("""*""" , UpperCAmelCase )
if "weight_g" in name:
__lowerCamelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase : Dict = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : List[str] = """weight"""
else:
__lowerCamelCase : List[str] = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase : List[Any] = name.split(""".""" )
__lowerCamelCase : Tuple = int(items[0] )
__lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def _UpperCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=None ):
"""simple docstring"""
__lowerCamelCase : Any = torch.load(UpperCAmelCase )
__lowerCamelCase : Optional[int] = WavLMConfigOrig(checkpoint["""cfg"""] )
__lowerCamelCase : Any = WavLMOrig(UpperCAmelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__lowerCamelCase : Union[str, Any] = WavLMConfig.from_pretrained(UpperCAmelCase )
else:
__lowerCamelCase : str = WavLMConfig()
__lowerCamelCase : Dict = WavLMModel(UpperCAmelCase )
recursively_load_weights(UpperCAmelCase , UpperCAmelCase )
hf_wavlm.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCamelCase : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 458
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Tuple = PriorTransformer
lowerCamelCase : Any = """hidden_states"""
@property
def UpperCAmelCase__ ( self : Any):
_lowercase: Optional[int] = 4
_lowercase: str = 8
_lowercase: Optional[Any] = 7
_lowercase: Tuple = floats_tensor((batch_size, embedding_dim)).to(_UpperCamelCase)
_lowercase: str = floats_tensor((batch_size, embedding_dim)).to(_UpperCamelCase)
_lowercase: List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(_UpperCamelCase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : Dict=0):
torch.manual_seed(_UpperCamelCase)
_lowercase: str = 4
_lowercase: Optional[int] = 8
_lowercase: List[Any] = 7
_lowercase: int = torch.randn((batch_size, embedding_dim)).to(_UpperCamelCase)
_lowercase: int = torch.randn((batch_size, embedding_dim)).to(_UpperCamelCase)
_lowercase: List[str] = torch.randn((batch_size, num_embeddings, embedding_dim)).to(_UpperCamelCase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCAmelCase__ ( self : Dict):
return (4, 8)
@property
def UpperCAmelCase__ ( self : int):
return (4, 8)
def UpperCAmelCase__ ( self : Any):
_lowercase: List[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
_lowercase: Optional[int] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: Optional[int] = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(_UpperCamelCase)
_lowercase: str = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Tuple = self.prepare_init_args_and_inputs_for_common()
_lowercase: Union[str, Any] = self.model_class(**_UpperCamelCase)
_lowercase: Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase: List[Any] = [*signature.parameters.keys()]
_lowercase: Any = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _UpperCamelCase)
def UpperCAmelCase__ ( self : List[str]):
_lowercase: List[str] = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy")
_lowercase: List[str] = model.to(_UpperCamelCase)
if hasattr(_UpperCamelCase , "set_default_attn_processor"):
model.set_default_attn_processor()
_lowercase: List[Any] = self.get_dummy_seed_input()
with torch.no_grad():
_lowercase: int = model(**_UpperCamelCase)[0]
_lowercase: Optional[int] = output[0, :5].flatten().cpu()
print(_UpperCamelCase)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_lowercase: Optional[int] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9])
self.assertTrue(torch_all_close(_UpperCamelCase , _UpperCamelCase , rtol=1e-2))
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Tuple=1 , _UpperCamelCase : Tuple=768 , _UpperCamelCase : List[str]=77 , _UpperCamelCase : Optional[Any]=0):
torch.manual_seed(_UpperCamelCase)
_lowercase: List[Any] = batch_size
_lowercase: str = embedding_dim
_lowercase: Dict = num_embeddings
_lowercase: List[Any] = torch.randn((batch_size, embedding_dim)).to(_UpperCamelCase)
_lowercase: List[Any] = torch.randn((batch_size, embedding_dim)).to(_UpperCamelCase)
_lowercase: int = torch.randn((batch_size, num_embeddings, embedding_dim)).to(_UpperCamelCase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase__ ( self : str):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
])
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Dict , _UpperCamelCase : int):
_lowercase: Dict = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior")
model.to(_UpperCamelCase)
_lowercase: int = self.get_dummy_seed_input(seed=_UpperCamelCase)
with torch.no_grad():
_lowercase: str = model(**_UpperCamelCase)[0]
assert list(sample.shape) == [1, 768]
_lowercase: Optional[int] = sample[0, :8].flatten().cpu()
print(_UpperCamelCase)
_lowercase: str = torch.tensor(_UpperCamelCase)
assert torch_all_close(_UpperCamelCase , _UpperCamelCase , atol=1e-3)
| 226
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( UpperCamelCase ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( UpperCAmelCase : ArgumentParser ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : str ) -> List[str]:
raise NotImplementedError()
| 553
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
__A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if metric == "rouge2":
__A = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__A = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__A = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__A = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
__A = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'val_{metric}' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class snake_case ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Any, _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = {f'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : pl.Trainer, _lowerCamelCase : pl.LightningModule, _lowerCamelCase : str, _lowerCamelCase : int=True ):
'''simple docstring'''
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
__A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__A = Path(pl_module.hparams.output_dir )
if type_path == "test":
__A = od / '''test_results.txt'''
__A = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__A = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
__A = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase, '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__A = metrics[key]
if isinstance(_lowerCamelCase, torch.Tensor ):
__A = val.item()
__A = f'{key}: {val:.6f}\n'
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
__A = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
try:
__A = pl_module.model.model.num_parameters()
except AttributeError:
__A = pl_module.model.num_parameters()
__A = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : pl.Trainer, _lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase, _lowerCamelCase, '''test''' )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : pl.Trainer, _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 215
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if nth_term == "":
return [""]
__A = int(__UpperCamelCase )
__A = int(__UpperCamelCase )
__A = []
for temp in range(int(__UpperCamelCase ) ):
series.append(f'1 / {pow(temp + 1 , int(__UpperCamelCase ) )}' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('Enter the last number (nth term) of the P-Series'))
lowercase_ = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 215
| 1
|
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def A_ ( lowercase ) -> Any:
"""simple docstring"""
UpperCAmelCase_ ,UpperCAmelCase_ : Union[str, Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase ):
for j in range(lowercase ):
UpperCAmelCase_ : Tuple = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase_ = imread("image_data/lena.jpg", 1)
# convert to its negative
lowercase_ = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 470
|
"""simple docstring"""
def A_ ( lowercase = 1 , lowercase = 1000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[str] = 0
for divide_by_number in range(lowercase , digit + 1 ):
UpperCAmelCase_ : list[int] = []
UpperCAmelCase_ : Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase ):
UpperCAmelCase_ : Dict = len(lowercase )
UpperCAmelCase_ : Optional[int] = divide_by_number
else:
has_been_divided.append(lowercase )
UpperCAmelCase_ : List[str] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470
| 1
|
'''simple docstring'''
from collections.abc import Generator
def SCREAMING_SNAKE_CASE_ ( ) -> Generator[int, None, None]:
_a , _a : Optional[Any] =0, 1
while True:
_a , _a : List[Any] =b, a + b
yield b
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
_a : Any =1
_a : Dict =fibonacci_generator()
while len(str(next(_UpperCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 506
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any=False ) -> str:
_a : Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a : Optional[int] =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
_a : str =""""""
else:
_a : Tuple ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : Optional[int] =state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_a : Any =state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_a : Any =in_proj_weight[
: config.hidden_size, :
]
_a : Dict =in_proj_bias[: config.hidden_size]
_a : Dict =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : Optional[Any] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : Union[str, Any] =in_proj_weight[
-config.hidden_size :, :
]
_a : List[str] =in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Optional[int]:
_a : Any =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Tuple:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_a : str =[
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> int:
_a : str =dct.pop(_UpperCAmelCase )
_a : Any =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ) -> str:
_a : List[Any] =ViTMSNConfig()
_a : Optional[int] =1000
_a : Union[str, Any] ="""datasets/huggingface/label-files"""
_a : Any ="""imagenet-1k-id2label.json"""
_a : Optional[int] =json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ) ,"""r""" ) )
_a : int ={int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_a : Optional[Any] =idalabel
_a : Dict ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_a : Tuple =384
_a : Optional[int] =1536
_a : Optional[int] =6
elif "l16" in checkpoint_url:
_a : int =1024
_a : int =4096
_a : List[str] =24
_a : Union[str, Any] =16
_a : Any =0.1
elif "b4" in checkpoint_url:
_a : Optional[int] =4
elif "l7" in checkpoint_url:
_a : Optional[int] =7
_a : Union[str, Any] =1024
_a : Dict =4096
_a : List[str] =24
_a : Any =16
_a : Dict =0.1
_a : Any =ViTMSNModel(_UpperCAmelCase )
_a : Union[str, Any] =torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location="""cpu""" )["""target_encoder"""]
_a : Union[str, Any] =ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
_a : List[str] =create_rename_keys(_UpperCAmelCase ,base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase ,_UpperCAmelCase ,base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : str =Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw )
_a : Union[str, Any] =ViTImageProcessor(
size=config.image_size ,image_mean=_UpperCAmelCase ,image_std=_UpperCAmelCase )
_a : Tuple =image_processor(images=_UpperCAmelCase ,return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_a : str =model(**_UpperCAmelCase )
_a : Union[str, Any] =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_a : Tuple =torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
_a : Optional[int] =torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
_a : str =torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
_a : List[str] =torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
_a : Optional[int] =torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,_UpperCAmelCase ,atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__: Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 506
| 1
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = 42
snake_case_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 137
|
def lowercase ( _a ) -> int:
if not isinstance(_a ,_a ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCAmelCase_: List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
| 1
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , 'all_results.json' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase_ )
else:
raise ValueError(F'can\'t find {path}' )
return results
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : str ):
import xla_spawn
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = time()
xla_spawn.main()
SCREAMING_SNAKE_CASE__ = time()
SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self : Optional[Any] ):
import xla_spawn
SCREAMING_SNAKE_CASE__ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
xla_spawn.main()
| 400
|
from collections import defaultdict
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCAmelCase_ ) )
]
SCREAMING_SNAKE_CASE__ = defaultdict(UpperCAmelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE__ = (1 << len(UpperCAmelCase_ )) - 1
def A_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE__ = self.count_ways_until(UpperCAmelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
SCREAMING_SNAKE_CASE__ = total_ways_util
return self.dp[mask][task_no]
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 400
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
A__ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
A__ = '''document_qa'''
A__ = AutoProcessor
A__ = VisionEncoderDecoderModel
A__ = ['''image''', '''text''']
A__ = ['''text''']
def __init__(self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase__ = task_prompt.replace("""{user_input}""" , _UpperCAmelCase )
lowercase__ = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors="""pt""" ).input_ids
lowercase__ = self.pre_processor(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def lowerCamelCase__ (self : str , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
lowercase__ = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowercase__ = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowercase__ = re.sub(r"""<.*?>""" , """""" , _UpperCAmelCase , count=1 ).strip() # remove first task start token
lowercase__ = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 15
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15
| 1
|
"""simple docstring"""
def _lowercase ( ) -> list[list[int]]:
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
a :str = generate_large_matrix()
a :List[str] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _lowercase ( __lowerCAmelCase ) -> None:
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : List[str] = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (left + right) // 2
SCREAMING_SNAKE_CASE__ : int = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE__ : int = mid + 1
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : Dict = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def _lowercase ( __lowerCAmelCase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : str = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def _lowercase ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE__ : Dict = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 12
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a :Optional[Any] = logging.get_logger(__name__)
a :Union[str, Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = """t5"""
_SCREAMING_SNAKE_CASE :List[str] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE :Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , _a=32_128 , _a=512 , _a=64 , _a=2_048 , _a=6 , _a=None , _a=8 , _a=32 , _a=128 , _a=0.1 , _a=1E-6 , _a=1.0 , _a="relu" , _a=True , _a=True , _a=0 , _a=1 , **_a , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : int = d_kv
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_ff
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_layers
SCREAMING_SNAKE_CASE__ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE__ : Tuple = num_heads
SCREAMING_SNAKE_CASE__ : Dict = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : str = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_factor
SCREAMING_SNAKE_CASE__ : Tuple = feed_forward_proj
SCREAMING_SNAKE_CASE__ : str = use_cache
SCREAMING_SNAKE_CASE__ : List[str] = self.feed_forward_proj.split("""-""" )
SCREAMING_SNAKE_CASE__ : Dict = act_info[-1]
SCREAMING_SNAKE_CASE__ : str = act_info[0] == """gated"""
if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE__ : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , )
class __a (UpperCamelCase_):
'''simple docstring'''
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
SCREAMING_SNAKE_CASE__ : Tuple = """past_encoder_sequence + sequence"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch"""}
SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 1: """decoder_sequence"""}
SCREAMING_SNAKE_CASE__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a , direction="""inputs""" )
return common_inputs
@property
def _a ( self ) -> int:
"""simple docstring"""
return 13
| 12
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default="Translation" , init=a__ , repr=a__ )
def __call__( self ) -> Any:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Optional[List] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default="TranslationVariableLanguages" , init=a__ , repr=a__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = sorted(set(self.languages ) ) if self.languages else None
lowerCAmelCase__ = len(self.languages ) if self.languages else None
def __call__( self ) -> List[str]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = set(self.languages )
if self.languages and set(lowerCamelCase_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCAmelCase__ = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCAmelCase__ , lowerCAmelCase__ = zip(*sorted(lowerCamelCase_ ) )
return {"language": languages, "translation": translations}
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 90
|
_SCREAMING_SNAKE_CASE = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 537
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = AltDiffusionPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(_lowercase )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
SCREAMING_SNAKE_CASE__ = 77
SCREAMING_SNAKE_CASE__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __a ( self : int , _lowercase : Any , _lowercase : Any=0 ):
"""simple docstring"""
if str(_lowercase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __a ( self : Optional[int] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __a ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(_lowercase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ = """A photo of an astronaut"""
SCREAMING_SNAKE_CASE__ = alt_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(_lowercase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ = alt_pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=_lowercase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=_lowercase , safety_checker=_lowercase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""numpy""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 379
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = knapsack(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE__ = values[index] + knapsack(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_weight - weights[index] , index + 1 )
return max(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379
| 1
|
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
_UpperCAmelCase = 3_0_0 # TEMPERATURE (unit = K)
def __magic_name__ ( lowercase , lowercase , lowercase , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 409
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase = 1_0
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
for i in range(lowercase , lowercase ):
if array[i] == target:
return i
return -1
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: List[Any] =len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =(left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: str =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE_: Optional[int] =two_third + 1
else:
SCREAMING_SNAKE_CASE_: List[str] =one_third + 1
SCREAMING_SNAKE_CASE_: Optional[Any] =two_third - 1
else:
return -1
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
if left < right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =(left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: List[str] =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase , one_third - 1 , lowercase , lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase , lowercase , lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase , lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase = ite_ternary_search(collection, target)
_UpperCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""")
| 409
| 1
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
lowercase : int
lowercase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
A : str =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
A : List[str] =hidden_states.shape
A : Dict =jax.image.resize(
__a , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
A : Tuple =self.conv(__a )
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
lowercase : int
lowercase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[Any] =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
A : Union[str, Any] =self.conv(__a )
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
lowercase : int
lowercase : int = None
lowercase : float = 0.0
lowercase : bool = None
lowercase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
A : List[str] =self.in_channels if self.out_channels is None else self.out_channels
A : int =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A : List[str] =nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A : Union[str, Any] =nn.Dense(__a , dtype=self.dtype )
A : Union[str, Any] =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A : Dict =nn.Dropout(self.dropout_prob )
A : List[str] =nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A : Optional[Any] =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
A : List[str] =None
if use_nin_shortcut:
A : List[Any] =nn.Conv(
__a , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=True ) -> Dict:
A : Optional[int] =hidden_states
A : List[Any] =self.norma(__a )
A : Any =nn.swish(__a )
A : str =self.conva(__a )
A : Any =self.time_emb_proj(nn.swish(__a ) )
A : Any =jnp.expand_dims(jnp.expand_dims(__a , 1 ) , 1 )
A : str =hidden_states + temb
A : Any =self.norma(__a )
A : Optional[Any] =nn.swish(__a )
A : Optional[Any] =self.dropout(__a , __a )
A : List[str] =self.conva(__a )
if self.conv_shortcut is not None:
A : str =self.conv_shortcut(__a )
return hidden_states + residual
| 707
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
a__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
for attribute in key.split(""".""" ):
_snake_case : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
_snake_case : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
_snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_snake_case : Optional[int] = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Optional[int] = value
elif weight_type == "bias":
_snake_case : Union[str, Any] = value
else:
_snake_case : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : Any = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : int = True
if "*" in mapped_key:
_snake_case : Dict = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
_snake_case : str = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_snake_case : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
_snake_case : List[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
_snake_case : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case : Tuple = """weight"""
else:
_snake_case : Any = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
_snake_case : Tuple = full_name.split("""conv_layers.""" )[-1]
_snake_case : str = name.split(""".""" )
_snake_case : Optional[Any] = int(items[0] )
_snake_case : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_snake_case : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_snake_case : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_snake_case : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=None ) -> Dict:
# load the pre-trained checkpoints
_snake_case : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = WavLMConfigOrig(checkpoint["""cfg"""] )
_snake_case : Optional[int] = WavLMOrig(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
_snake_case : Dict = WavLMConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
_snake_case : Any = WavLMConfig()
_snake_case : Any = WavLMModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_wavlm.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 477
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Union[str, Any]=[32, 64, 128] , lowerCAmelCase : Tuple=[1, 2, 1] , lowerCAmelCase : Dict=[2, 2, 4] , lowerCAmelCase : Dict=2 , lowerCAmelCase : Any=2.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : Any=True , lowerCAmelCase : Dict=None , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : str=8 , lowerCAmelCase : int=["stage1", "stage2"] , lowerCAmelCase : List[str]=[1, 2] , ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Dict = image_size
_snake_case : Tuple = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : Dict = embed_dim
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : int = depths
_snake_case : Tuple = num_heads
_snake_case : Any = window_size
_snake_case : int = mlp_ratio
_snake_case : Union[str, Any] = qkv_bias
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : List[str] = drop_path_rate
_snake_case : Union[str, Any] = hidden_act
_snake_case : Any = use_absolute_embeddings
_snake_case : Dict = patch_norm
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = is_training
_snake_case : Dict = scope
_snake_case : Any = use_labels
_snake_case : int = type_sequence_label_size
_snake_case : int = encoder_stride
_snake_case : Optional[Any] = out_features
_snake_case : Any = out_indices
def UpperCamelCase_ ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_snake_case : Dict = None
if self.use_labels:
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[Any]) -> Any:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = FocalNetModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[Any] = model(lowerCAmelCase)
_snake_case : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_snake_case : Any = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
_snake_case : Any = FocalNetBackbone(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : int = model(lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : str = FocalNetBackbone(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : List[str] = model(lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : List[str] = model(lowerCAmelCase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_snake_case : Dict = 1
_snake_case : Union[str, Any] = FocalNetForMaskedImageModeling(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_snake_case : Optional[int] = model(lowerCAmelCase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def UpperCamelCase_ ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = self.type_sequence_label_size
_snake_case : List[str] = FocalNetForImageClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[int] = model(lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_snake_case : List[str] = 1
_snake_case : str = FocalNetForImageClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_snake_case : int = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase_ ( self : Tuple) -> Dict:
"""simple docstring"""
_snake_case : int = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Optional[int] = config_and_inputs
_snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ : Optional[int] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ : List[Any] = False
snake_case_ : List[str] = False
snake_case_ : Dict = False
snake_case_ : str = False
snake_case_ : Optional[Any] = False
def UpperCamelCase_ ( self : List[str]) -> Any:
"""simple docstring"""
_snake_case : List[str] = FocalNetModelTester(self)
_snake_case : List[str] = ConfigTester(self , config_class=lowerCAmelCase , embed_dim=37 , has_text_modality=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return
def UpperCamelCase_ ( self : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase)
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple) -> int:
"""simple docstring"""
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase)
@unittest.skip(reason="""FocalNet does not use inputs_embeds""")
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""")
def UpperCamelCase_ ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_snake_case : List[str] = model_class(lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear))
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_snake_case : Optional[int] = model_class(lowerCAmelCase)
_snake_case : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Dict = [*signature.parameters.keys()]
_snake_case : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
_snake_case : str = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
with torch.no_grad():
_snake_case : int = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase))
_snake_case : List[Any] = outputs.hidden_states
_snake_case : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
# FocalNet has a different seq_length
_snake_case : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_snake_case : int = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
_snake_case , _snake_case , _snake_case , _snake_case : str = reshaped_hidden_states[0].shape
_snake_case : Any = (
reshaped_hidden_states[0].view(lowerCAmelCase , lowerCAmelCase , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase_ ( self : Dict) -> List[str]:
"""simple docstring"""
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_snake_case : int = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = 3
_snake_case : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_snake_case : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_snake_case : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_snake_case : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_snake_case : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width))
@slow
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Union[str, Any] = FocalNetModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def UpperCamelCase_ ( self : Dict) -> List[str]:
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[int] = _config_zero_init(lowerCAmelCase)
for model_class in self.all_model_classes:
_snake_case : str = model_class(config=lowerCAmelCase)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Any = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(lowerCAmelCase)
_snake_case : Any = self.default_image_processor
_snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
_snake_case : Optional[int] = image_processor(images=lowerCAmelCase , return_tensors="""pt""").to(lowerCAmelCase)
# forward pass
with torch.no_grad():
_snake_case : List[str] = model(**lowerCAmelCase)
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase)
_snake_case : str = torch.tensor([0.2_166, -0.4_368, 0.2_191]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : List[str] = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ : Any = FocalNetConfig
snake_case_ : Optional[Any] = False
def UpperCamelCase_ ( self : int) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = FocalNetModelTester(self)
| 477
| 1
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , ):
__A : str = size if size is not None else {'height': 18, 'width': 18}
__A : int = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : Dict = image_size
__A : List[str] = min_resolution
__A : List[str] = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Dict = do_normalize
__A : Optional[Any] = image_mean
__A : Dict = image_std
def UpperCAmelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = DPTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : List[str] = DPTImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
__A : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : Dict = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : List[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
__A : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : int = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 715
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29
| 0
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = XLMProphetNetTokenizer
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Optional[int] = True
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase :str = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Union[str, Any] = '[PAD]'
_lowerCAmelCase :List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 1012 )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[Any] = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase :List[str] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_lowerCAmelCase :str = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Any = 'Hello World!'
_lowerCAmelCase :Tuple = [3_5389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
# fmt: off
_lowerCAmelCase :Optional[Any] = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 720
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[Any] = 'ZinengTang/tvlt-base'
_lowerCAmelCase :Tuple = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , **_UpperCAmelCase: List[str] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , **_UpperCAmelCase: int ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Optional[Any] = self.get_image_processor()
_lowerCAmelCase :List[str] = self.get_feature_extractor()
_lowerCAmelCase :List[Any] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :Dict = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :List[str] = self.get_image_processor()
_lowerCAmelCase :Tuple = self.get_feature_extractor()
_lowerCAmelCase :List[str] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
_lowerCAmelCase :str = np.ones([1_2000] )
_lowerCAmelCase :Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='np' )
_lowerCAmelCase :Optional[int] = processor(audio=_UpperCAmelCase , return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = self.get_image_processor()
_lowerCAmelCase :str = self.get_feature_extractor()
_lowerCAmelCase :Any = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = np.ones([3, 224, 224] )
_lowerCAmelCase :Optional[int] = image_processor(_UpperCAmelCase , return_tensors='np' )
_lowerCAmelCase :List[Any] = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = self.get_image_processor()
_lowerCAmelCase :Optional[Any] = self.get_feature_extractor()
_lowerCAmelCase :Optional[int] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
_lowerCAmelCase :List[str] = np.ones([1_2000] )
_lowerCAmelCase :List[str] = np.ones([3, 224, 224] )
_lowerCAmelCase :Union[str, Any] = processor(audio=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Tuple = self.get_image_processor()
_lowerCAmelCase :Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase :Union[str, Any] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 382
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = BlenderbotSmallTokenizer
lowerCamelCase_ : Dict = False
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : Optional[int] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
snake_case_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
snake_case_ : List[str] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , **__magic_name__ ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = '''adapt act apte'''
snake_case_ : int = '''adapt act apte'''
return input_text, output_text
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : List[Any] = '''adapt act apte'''
snake_case_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case_ : str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
snake_case_ : int = '''I am a small frog.'''
snake_case_ : Dict = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
snake_case_ : str = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
snake_case_ : Tuple = '''I am a small frog .'''
snake_case_ : Optional[Any] = '''.'''
snake_case_ : int = tok(__magic_name__ )['''input_ids''']
snake_case_ : Any = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 60
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = MgpstrTokenizer
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Tuple = {}
UpperCamelCase_ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Dict ):
super().setUp()
# fmt: off
lowerCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase__ = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case__ ) + """\n""" )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Tuple ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Dict ):
lowerCAmelCase__ = """tester"""
lowerCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=snake_case__ )
self.assertEqual(len(snake_case__ ) , 1 )
lowerCAmelCase__ = tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
self.assertTrue(special_token not in decoded )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_input_output_texts(snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize(snake_case__ )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
lowerCAmelCase__ = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertNotEqual(len(snake_case__ ) , 0 )
lowerCAmelCase__ = tokenizer.decode(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
| 644
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( __lowerCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="None" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = relative_attention
lowerCAmelCase = position_biased_input
lowerCAmelCase = pos_att_type
lowerCAmelCase = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
lowerCAmelCase = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = DebertaVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
lowerCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
lowerCAmelCase = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 717
|
'''simple docstring'''
def snake_case ( snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = len(snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if len(snake_case ) <= 1:
return arr, 0
lowerCAmelCase = len(snake_case ) // 2
lowerCAmelCase = arr[0:mid]
lowerCAmelCase = arr[mid:]
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
lowerCAmelCase , lowerCAmelCase = _count_cross_inversions(snake_case , snake_case )
lowerCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case ( snake_case : Union[str, Any] , snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0
while i < len(snake_case ) and j < len(snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , snake_case )
# an empty list should also have zero inversions
lowerCAmelCase = []
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , snake_case )
if __name__ == "__main__":
main()
| 514
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a__ : Union[str, Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCamelCase ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCamelCase ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __snake_case ( self : Dict ):
UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
UpperCAmelCase = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
UpperCAmelCase = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(a__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
UpperCAmelCase = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=a__ )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def __snake_case ( self : Union[str, Any] ):
import torch
UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __snake_case ( self : str ):
UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __snake_case ( self : Any ):
UpperCAmelCase = pipeline('''text-classification''' )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
UpperCAmelCase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
UpperCAmelCase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __snake_case ( self : str ):
UpperCAmelCase = pipeline('''text-classification''' , framework='''tf''' )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
UpperCAmelCase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
UpperCAmelCase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __snake_case ( self : Optional[Any] , a__ : Tuple , a__ : List[Any] , a__ : Any ):
UpperCAmelCase = TextClassificationPipeline(model=a__ , tokenizer=a__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __snake_case ( self : Any , a__ : int , a__ : Tuple ):
UpperCAmelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCAmelCase = '''HuggingFace is in'''
UpperCAmelCase = text_classifier(a__ )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
UpperCAmelCase = ['''HuggingFace is in ''', '''Paris is in France''']
UpperCAmelCase = text_classifier(a__ )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}, {'''label''': ANY(a__ ), '''score''': ANY(a__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCAmelCase = text_classifier(a__ , top_k=a__ )
UpperCAmelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(a__ ) , [[{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] * N, [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] * N] , )
UpperCAmelCase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
UpperCAmelCase = text_classifier(a__ )
self.assertEqual(
nested_simplify(a__ ) , {'''label''': ANY(a__ ), '''score''': ANY(a__ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCAmelCase = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(a__ ):
text_classifier(a__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCAmelCase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 51
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowerCamelCase__ =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCamelCase__ =1
if upper_limit > 0:
lowerCamelCase__ =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
a =int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 530
| 0
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__A : List[str] = logging.getLogger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : str = 'summarization'
lowercase : Tuple = ['loss']
lowercase : Dict = ROUGE_KEYS
lowercase : Optional[Any] = 'rouge2'
def __init__( self :Optional[int] ,_UpperCamelCase :int ,**_UpperCamelCase :Any ):
if hparams.sortish_sampler and hparams.gpus > 1:
snake_case_ : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(_UpperCamelCase ,num_labels=_UpperCamelCase ,mode=self.mode ,**_UpperCamelCase )
use_task_specific_params(self.model ,"""summarization""" )
save_git_info(self.hparams.output_dir )
snake_case_ : Dict = Path(self.output_dir ) / """metrics.json"""
snake_case_ : str = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams ,self.hparams_save_path )
snake_case_ : Tuple = 0
snake_case_ : Tuple = defaultdict(_UpperCamelCase )
snake_case_ : Tuple = self.config.model_type
snake_case_ : str = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
snake_case_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
snake_case_ : int = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
snake_case_ : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
snake_case_ : Optional[int] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
snake_case_ : Union[str, Any] = get_git_info()["""repo_sha"""]
snake_case_ : Optional[int] = hparams.num_workers
snake_case_ : List[Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,_UpperCamelCase ):
snake_case_ : Any = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
snake_case_ : Union[str, Any] = self.decoder_start_token_id
snake_case_ : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer ,"""prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
snake_case_ : Tuple = False
snake_case_ : List[str] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
snake_case_ : Union[str, Any] = self.hparams.eval_max_gen_length
else:
snake_case_ : Any = self.model.config.max_length
snake_case_ : int = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Dict[str, torch.Tensor] ):
snake_case_ : List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCamelCase ,Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / """tok_batch.json""" )
snake_case_ : int = True
return readable_batch
def a__ ( self :Union[str, Any] ,_UpperCamelCase :str ,**_UpperCamelCase :List[str] ):
return self.model(_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :List[int] ):
snake_case_ : Tuple = self.tokenizer.batch_decode(
_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
return lmap(str.strip ,_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :dict ):
snake_case_ : int = self.tokenizer.pad_token_id
snake_case_ , snake_case_ : Dict = batch["""input_ids"""], batch["""attention_mask"""]
snake_case_ : Any = batch["""labels"""]
if isinstance(self.model ,_UpperCamelCase ):
snake_case_ : str = self.model._shift_right(_UpperCamelCase )
else:
snake_case_ : List[str] = shift_tokens_right(_UpperCamelCase ,_UpperCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
snake_case_ : Any = decoder_input_ids
self.save_readable_batch(_UpperCamelCase )
snake_case_ : Optional[int] = self(_UpperCamelCase ,attention_mask=_UpperCamelCase ,decoder_input_ids=_UpperCamelCase ,use_cache=_UpperCamelCase )
snake_case_ : Union[str, Any] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
snake_case_ : Any = nn.CrossEntropyLoss(ignore_index=_UpperCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
snake_case_ : List[str] = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) )
else:
snake_case_ : Optional[Any] = nn.functional.log_softmax(_UpperCamelCase ,dim=-1 )
snake_case_ , snake_case_ : Optional[Any] = label_smoothed_nll_loss(
_UpperCamelCase ,_UpperCamelCase ,self.hparams.label_smoothing ,ignore_index=_UpperCamelCase )
return (loss,)
@property
def a__ ( self :Union[str, Any] ):
return self.tokenizer.pad_token_id
def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Optional[Any] = self._step(_UpperCamelCase )
snake_case_ : int = dict(zip(self.loss_names ,_UpperCamelCase ) )
# tokens per batch
snake_case_ : List[str] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
snake_case_ : List[str] = batch["""input_ids"""].shape[0]
snake_case_ : Dict = batch["""input_ids"""].eq(self.pad ).sum()
snake_case_ : Any = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def a__ ( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :int ):
return self._generative_step(_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Union[str, Any]="val" ):
self.step_count += 1
snake_case_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
snake_case_ : Optional[Any] = losses["""loss"""]
snake_case_ : str = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
snake_case_ : Union[str, Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
snake_case_ : torch.FloatTensor = torch.tensor(_UpperCamelCase ).type_as(_UpperCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCamelCase )
snake_case_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
snake_case_ : Union[str, Any] = self.step_count
self.metrics[prefix].append(_UpperCamelCase ) # callback writes this to self.metrics_save_path
snake_case_ : Dict = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ):
return calculate_rouge(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Optional[int] ,_UpperCamelCase :dict ):
snake_case_ : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
snake_case_ : Dict = self.model.generate(
batch["""input_ids"""] ,attention_mask=batch["""attention_mask"""] ,use_cache=_UpperCamelCase ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
snake_case_ : Dict = (time.time() - ta) / batch["""input_ids"""].shape[0]
snake_case_ : List[str] = self.ids_to_clean_text(_UpperCamelCase )
snake_case_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
snake_case_ : Optional[Any] = self._step(_UpperCamelCase )
snake_case_ : Optional[Any] = dict(zip(self.loss_names ,_UpperCamelCase ) )
snake_case_ : Dict = self.calc_generative_metrics(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[str] = np.mean(lmap(_UpperCamelCase ,_UpperCamelCase ) )
base_metrics.update(gen_time=_UpperCamelCase ,gen_len=_UpperCamelCase ,preds=_UpperCamelCase ,target=_UpperCamelCase ,**_UpperCamelCase )
return base_metrics
def a__ ( self :List[str] ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str] ):
return self._generative_step(_UpperCamelCase )
def a__ ( self :List[Any] ,_UpperCamelCase :Tuple ):
return self.validation_epoch_end(_UpperCamelCase ,prefix="""test""" )
def a__ ( self :int ,_UpperCamelCase :Tuple ):
snake_case_ : Dict = self.n_obs[type_path]
snake_case_ : int = self.target_lens[type_path]
snake_case_ : int = self.dataset_class(
self.tokenizer ,type_path=_UpperCamelCase ,n_obs=_UpperCamelCase ,max_target_length=_UpperCamelCase ,**self.dataset_kwargs ,)
return dataset
def a__ ( self :Optional[int] ,_UpperCamelCase :str ,_UpperCamelCase :int ,_UpperCamelCase :bool = False ):
snake_case_ : int = self.get_dataset(_UpperCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
snake_case_ : int = dataset.make_sortish_sampler(_UpperCamelCase ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase ,batch_size=_UpperCamelCase ,collate_fn=dataset.collate_fn ,shuffle=_UpperCamelCase ,num_workers=self.num_workers ,sampler=_UpperCamelCase ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
snake_case_ : Any = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase ,batch_sampler=_UpperCamelCase ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
_UpperCamelCase ,batch_size=_UpperCamelCase ,collate_fn=dataset.collate_fn ,shuffle=_UpperCamelCase ,num_workers=self.num_workers ,sampler=_UpperCamelCase ,)
def a__ ( self :Optional[Any] ):
snake_case_ : Dict = self.get_dataloader("""train""" ,batch_size=self.hparams.train_batch_size ,shuffle=_UpperCamelCase )
return dataloader
def a__ ( self :str ):
return self.get_dataloader("""val""" ,batch_size=self.hparams.eval_batch_size )
def a__ ( self :Union[str, Any] ):
return self.get_dataloader("""test""" ,batch_size=self.hparams.eval_batch_size )
@staticmethod
def a__ ( _UpperCamelCase :Any ,_UpperCamelCase :Union[str, Any] ):
BaseTransformer.add_model_specific_args(_UpperCamelCase ,_UpperCamelCase )
add_generic_args(_UpperCamelCase ,_UpperCamelCase )
parser.add_argument(
"""--max_source_length""" ,default=1_0_2_4 ,type=_UpperCamelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--max_target_length""" ,default=5_6 ,type=_UpperCamelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--val_max_target_length""" ,default=1_4_2 ,type=_UpperCamelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--test_max_target_length""" ,default=1_4_2 ,type=_UpperCamelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument("""--freeze_encoder""" ,action="""store_true""" )
parser.add_argument("""--freeze_embeds""" ,action="""store_true""" )
parser.add_argument("""--sortish_sampler""" ,action="""store_true""" ,default=_UpperCamelCase )
parser.add_argument("""--overwrite_output_dir""" ,action="""store_true""" ,default=_UpperCamelCase )
parser.add_argument("""--max_tokens_per_batch""" ,type=_UpperCamelCase ,default=_UpperCamelCase )
parser.add_argument("""--logger_name""" ,type=_UpperCamelCase ,choices=["""default""", """wandb""", """wandb_shared"""] ,default="""default""" )
parser.add_argument("""--n_train""" ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" ,type=_UpperCamelCase ,default=5_0_0 ,required=_UpperCamelCase ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" ,type=_UpperCamelCase ,default="""summarization""" ,required=_UpperCamelCase ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" ,type=_UpperCamelCase ,default=0.0 ,required=_UpperCamelCase )
parser.add_argument("""--src_lang""" ,type=_UpperCamelCase ,default="""""" ,required=_UpperCamelCase )
parser.add_argument("""--tgt_lang""" ,type=_UpperCamelCase ,default="""""" ,required=_UpperCamelCase )
parser.add_argument("""--eval_beams""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,required=_UpperCamelCase )
parser.add_argument(
"""--val_metric""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,required=_UpperCamelCase ,choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" ,type=_UpperCamelCase ,default=1 ,required=_UpperCamelCase ,help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) ,)
return parser
class __UpperCamelCase ( lowercase__ ):
lowercase : Optional[Any] = 'translation'
lowercase : List[Any] = ['loss']
lowercase : Optional[int] = ['bleu']
lowercase : List[Any] = 'bleu'
def __init__( self :Optional[int] ,_UpperCamelCase :str ,**_UpperCamelCase :int ):
super().__init__(_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Optional[Any] = hparams.src_lang
snake_case_ : str = hparams.tgt_lang
def a__ ( self :Any ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Union[str, Any] ):
return calculate_bleu(_UpperCamelCase ,_UpperCamelCase )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any]=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=lowerCamelCase_ )
check_output_dir(lowerCamelCase_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
snake_case_ : SummarizationModule = SummarizationModule(lowerCamelCase_ )
else:
snake_case_ : SummarizationModule = TranslationModule(lowerCamelCase_ )
snake_case_ : List[Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
snake_case_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
snake_case_ : Tuple = os.environ.get("""WANDB_PROJECT""" , lowerCamelCase_ )
snake_case_ : List[Any] = WandbLogger(name=model.output_dir.name , project=lowerCamelCase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
snake_case_ : str = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
snake_case_ : Optional[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = args.val_metric == """loss"""
snake_case_ : pl.Trainer = generic_train(
lowerCamelCase_ , lowerCamelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowerCamelCase_ ) , early_stopping_callback=lowerCamelCase_ , logger=lowerCamelCase_ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
snake_case_ : List[str] = """"""
snake_case_ : Tuple = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=lowerCamelCase_ ) )
if checkpoints:
snake_case_ : Optional[int] = checkpoints[-1]
snake_case_ : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
__A : str = pl.Trainer.add_argparse_args(parser)
__A : Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__A : Optional[Any] = parser.parse_args()
main(args)
| 267
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : Tuple = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : str = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
snake_case_ : str = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str]=None ):
'''simple docstring'''
snake_case_ : Union[str, Any] = {}
for old_key in state_dict.keys():
snake_case_ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
snake_case_ : Dict = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
snake_case_ : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
snake_case_ : List[str] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
snake_case_ : Any = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
snake_case_ : str = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
snake_case_ : str = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
snake_case_ : List[str] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
snake_case_ : Dict = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
snake_case_ : Dict = state_dict[old_key]
return new_dict
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :str = WEIGHTS_NAME ):
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : Dict = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
for expert in range(lowerCamelCase_ ):
snake_case_ : Optional[Any] = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(lowerCamelCase_ ):
snake_case_ : List[Any] = torch.load(lowerCamelCase_ )["""model"""]
remove_ignore_keys_(lowerCamelCase_ )
snake_case_ : List[str] = rename_fairseq_keys(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : List[str] = os.path.join(
lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowerCamelCase_ )[0]].dtype )
# Add the last block
snake_case_ : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
snake_case_ : Tuple = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(lowerCamelCase_ )
snake_case_ : Tuple = rename_fairseq_keys(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowerCamelCase_ ) == 1:
snake_case_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowerCamelCase_ , lowerCamelCase_ )
# Otherwise, let's build the index
snake_case_ : str = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case_ : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' )
snake_case_ : Optional[int] = os.path.join(lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
for key in shard:
snake_case_ : Optional[int] = shard_file
# Add the metadata
snake_case_ : Any = {"""total_size""": total_size}
snake_case_ : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
snake_case_ : List[str] = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + """\n"""
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A : List[str] = parser.parse_args()
__A, __A : List[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__A : Tuple = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : List[str] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 267
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = LxmertTokenizer
__snake_case = LxmertTokenizerFast
__snake_case = True
__snake_case = True
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
super().setUp()
A__ : Any =[
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A__ : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase__ ( self : int , lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
A__ : str ="""UNwant\u00E9d,running"""
A__ : List[Any] ="""unwanted, running"""
return input_text, output_text
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : Tuple =self.tokenizer_class(self.vocab_file )
A__ : Tuple =tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ : List[Any] =self.get_tokenizer()
A__ : Tuple =self.get_rust_tokenizer()
A__ : Dict ="""I was born in 92000, and this is falsé."""
A__ : List[Any] =tokenizer.tokenize(lowerCAmelCase_ )
A__ : str =rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
A__ : List[Any] =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[Any] =self.get_rust_tokenizer()
A__ : Dict =tokenizer.encode(lowerCAmelCase_ )
A__ : Tuple =rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 215
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__snake_case : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Dict = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__snake_case : str = {
'unc-nlp/lxmert-base-uncased': 512,
}
__snake_case : int = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = LxmertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]="[UNK]" , lowerCAmelCase_ : List[Any]="[SEP]" , lowerCAmelCase_ : Dict="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : int="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : List[str] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
A__ : Tuple =getattr(lowerCAmelCase_ , normalizer_state.pop("""type""" ) )
A__ : Optional[int] =do_lower_case
A__ : List[str] =strip_accents
A__ : str =tokenize_chinese_chars
A__ : int =normalizer_class(**lowerCAmelCase_ )
A__ : Dict =do_lower_case
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=None ) -> Optional[int]:
'''simple docstring'''
A__ : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ : Union[str, Any] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 215
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase):
_a = MgpstrTokenizer
_a = False
_a = {}
_a = False
def SCREAMING_SNAKE_CASE ( self: Tuple ):
super().setUp()
# fmt: off
lowercase :List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase :Tuple = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
def SCREAMING_SNAKE_CASE ( self: Dict , **_lowerCAmelCase: Optional[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Any ):
lowercase :Any = "tester"
lowercase :Dict = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :str = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowercase :str = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase :Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
lowercase :Any = tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowercase :int = self.get_input_output_texts(_lowerCAmelCase )
lowercase :Optional[Any] = tokenizer.tokenize(_lowerCAmelCase )
lowercase :int = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
lowercase :Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertNotEqual(len(_lowerCAmelCase ) , 0 )
lowercase :Any = tokenizer.decode(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(text_a.replace(" " , "" ) , _lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
pass
| 720
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = r"\w+[.]\d+"
lowercase :Tuple = re.findall(lowerCamelCase, lowerCamelCase )
for pat in pats:
lowercase :List[str] = key.replace(lowerCamelCase, "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase :Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase :int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase :List[str] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase :Union[str, Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase :List[Any] = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase :Optional[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase :Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase :Tuple = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase :int = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase :Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase :str = flax_model.init_weights(PRNGKey(lowerCamelCase ) )
lowercase :Tuple = flatten_dict(lowerCamelCase )
lowercase :Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase :List[Any] = rename_key(lowerCamelCase )
lowercase :List[Any] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase , lowercase :List[str] = rename_key_and_reshape_tensor(lowerCamelCase, lowerCamelCase, lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowercase :List[str] = jnp.asarray(lowerCamelCase )
return unflatten_dict(lowerCamelCase )
| 453
| 0
|
__A = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__A = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__A = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
assert len(str(_lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_A = year // 1_00
_A = (5 * (century % 4) + 2) % 7
_A = year % 1_00
_A = centurian % 12
_A = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 484
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Any = """autoformer"""
lowerCamelCase : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase : bool = True , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 64 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 32 , _UpperCamelCase : int = 32 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 100 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : bool = True , _UpperCamelCase : int=True , _UpperCamelCase : int = 10 , _UpperCamelCase : int = 25 , _UpperCamelCase : int = 3 , **_UpperCamelCase : Dict , ):
# time series specific configuration
_lowercase: int = prediction_length
_lowercase: Any = context_length if context_length is not None else prediction_length
_lowercase: Optional[Any] = distribution_output
_lowercase: str = loss
_lowercase: Union[str, Any] = input_size
_lowercase: str = num_time_features
_lowercase: Any = lags_sequence
_lowercase: List[str] = scaling
_lowercase: Union[str, Any] = num_dynamic_real_features
_lowercase: Tuple = num_static_real_features
_lowercase: str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_UpperCamelCase) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
_lowercase: Tuple = cardinality
else:
_lowercase: Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_UpperCamelCase) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
_lowercase: str = embedding_dimension
else:
_lowercase: Any = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
_lowercase: int = num_parallel_samples
# Transformer architecture configuration
_lowercase: Union[str, Any] = input_size * len(self.lags_sequence) + self._number_of_features
_lowercase: Any = d_model
_lowercase: Optional[int] = encoder_attention_heads
_lowercase: Union[str, Any] = decoder_attention_heads
_lowercase: Optional[Any] = encoder_ffn_dim
_lowercase: Optional[int] = decoder_ffn_dim
_lowercase: List[str] = encoder_layers
_lowercase: str = decoder_layers
_lowercase: Union[str, Any] = dropout
_lowercase: Optional[Any] = attention_dropout
_lowercase: Union[str, Any] = activation_dropout
_lowercase: str = encoder_layerdrop
_lowercase: Optional[int] = decoder_layerdrop
_lowercase: List[Any] = activation_function
_lowercase: Optional[Any] = init_std
_lowercase: int = use_cache
# Autoformer
_lowercase: Tuple = label_length
_lowercase: Any = moving_average
_lowercase: Any = autocorrelation_factor
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase)
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 226
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while second != 0:
snake_case__ = first & second
first ^= second
snake_case__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase__ : Union[str, Any] = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 708
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:Optional[Any] , _a:List[Any] , _a:Any=7 , _a:str=3 , _a:Tuple=10 , _a:str=18 , _a:List[str]=30 , _a:Tuple=4_00 , _a:str=True , _a:List[str]=None , _a:List[str]=True , _a:Optional[Any]=[0.5, 0.5, 0.5] , _a:List[str]=[0.5, 0.5, 0.5] , _a:int=None , ):
snake_case__ = size if size is not None else {'''shortest_edge''': 18}
snake_case__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = num_frames
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = crop_size
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 208
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''blenderbot-small'''
_lowercase : List[Any] = ['''past_key_values''']
_lowercase : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowercase=50_265 , _lowercase=512 , _lowercase=8 , _lowercase=2_048 , _lowercase=16 , _lowercase=8 , _lowercase=2_048 , _lowercase=16 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase=True , _lowercase="gelu" , _lowercase=512 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1 , _lowercase=False , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=2 , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase = {0: """batch"""}
_lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowercase ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowercase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowercase ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _lowercase ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ):
"""simple docstring"""
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
_lowerCAmelCase = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
_lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowercase , _lowercase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowercase , _lowercase )
_lowerCAmelCase = max(_lowercase , _lowercase ) - min_num_layers
_lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def _lowercase ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ):
"""simple docstring"""
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def _lowercase ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ):
"""simple docstring"""
_lowerCAmelCase = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowercase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def _lowercase ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
_lowerCAmelCase = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
| 5
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """layoutlmv3"""
def __init__( self , lowercase=50265 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-5 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1024 , lowercase=128 , lowercase=128 , lowercase=True , lowercase=32 , lowercase=128 , lowercase=64 , lowercase=256 , lowercase=True , lowercase=True , lowercase=True , lowercase=224 , lowercase=3 , lowercase=16 , lowercase=None , **lowercase , ):
super().__init__(
vocab_size=lowercase , hidden_size=lowercase , num_hidden_layers=lowercase , num_attention_heads=lowercase , intermediate_size=lowercase , hidden_act=lowercase , hidden_dropout_prob=lowercase , attention_probs_dropout_prob=lowercase , max_position_embeddings=lowercase , type_vocab_size=lowercase , initializer_range=lowercase , layer_norm_eps=lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase , )
_lowerCamelCase : List[str] = max_ad_position_embeddings
_lowerCamelCase : Optional[Any] = coordinate_size
_lowerCamelCase : int = shape_size
_lowerCamelCase : Optional[Any] = has_relative_attention_bias
_lowerCamelCase : Dict = rel_pos_bins
_lowerCamelCase : Tuple = max_rel_pos
_lowerCamelCase : int = has_spatial_attention_bias
_lowerCamelCase : Optional[int] = rel_ad_pos_bins
_lowerCamelCase : List[Any] = max_rel_ad_pos
_lowerCamelCase : Any = text_embed
_lowerCamelCase : List[Any] = visual_embed
_lowerCamelCase : str = input_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = classifier_dropout
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.12""" )
@property
def A_ ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def A_ ( self ):
return 1E-5
@property
def A_ ( self ):
return 12
def A_ ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 40 , lowercase = 40 , ):
setattr(processor.image_processor , 'apply_ocr' , lowercase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase : Any = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Tuple = processor.tokenizer.num_special_tokens_to_add(lowercase )
_lowerCamelCase : Any = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : List[Any] = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowerCamelCase : List[str] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowerCamelCase : Tuple = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
_lowerCamelCase : str = dict(
processor(
lowercase , text=lowercase , boxes=lowercase , return_tensors=lowercase , ) )
return inputs
| 630
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase = pd.read_csv("sample_data.csv", header=None)
UpperCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase = df.iloc[:, 1:2]
UpperCamelCase = actual_data.values.reshape(len_data, 1)
UpperCamelCase = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase = 10
UpperCamelCase = 5
UpperCamelCase = 20
UpperCamelCase = len_data - periods * look_back
UpperCamelCase = actual_data[:division]
UpperCamelCase = actual_data[division - look_back :]
UpperCamelCase , UpperCamelCase = [], []
UpperCamelCase , UpperCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase = np.array(train_x)
UpperCamelCase = np.array(test_x)
UpperCamelCase = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
UpperCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase = model.predict(x_test)
| 721
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Any , *lowerCamelCase__ :Union[str, Any] , **lowerCamelCase__ :int ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 383
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12
| 1
|
"""simple docstring"""
import math
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ): # a graph with Node 0,1,...,N-1
lowerCamelCase__ = n
lowerCamelCase__ = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowerCamelCase__ = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = w
def _UpperCamelCase ( self : List[Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCamelCase__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
return self.dp[u][v]
if __name__ == "__main__":
_snake_case = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 659
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = get_failure_array(A__)
# 2) Step through text searching for pattern
__snake_case , __snake_case = 0, 0 # index into text, pattern
while i < len(A__):
if pattern[j] == text[i]:
if j == (len(A__) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__snake_case = failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = [0]
__snake_case = 0
__snake_case = 1
while j < len(A__):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__snake_case = failure[i - 1]
continue
j += 1
failure.append(A__)
return failure
if __name__ == "__main__":
# Test 1)
__lowercase : List[str] = "abc1abc12"
__lowercase : str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__lowercase : List[Any] = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowercase : Tuple = "ABABX"
__lowercase : str = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
__lowercase : Dict = "AAAB"
__lowercase : Optional[int] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
__lowercase : int = "abcdabcy"
__lowercase : Dict = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
__lowercase : List[str] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 564
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = """sample"""
@property
def snake_case_ ( self , _snake_case=(32, 32) ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
return (3, 32, 32)
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
return (3, 32, 32)
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ) -> int:
"""simple docstring"""
pass
def snake_case_ ( self ) -> int:
"""simple docstring"""
pass
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_snake_case ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase = image.to(_snake_case )
with torch.no_grad():
UpperCAmelCase = model(_snake_case ).sample
UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3 ) )
| 254
| 0
|
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = '''token-classification'''
def __init__( self : str , lowerCAmelCase__ : str ):
"""simple docstring"""
if type(lowerCAmelCase__ ) == dict:
__SCREAMING_SNAKE_CASE : List[Any] = Namespace(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = import_module("""tasks""" )
try:
__SCREAMING_SNAKE_CASE : List[Any] = getattr(lowerCAmelCase__ , hparams.task_type )
__SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
__SCREAMING_SNAKE_CASE : int = self.token_classification_task.get_labels(hparams.labels )
__SCREAMING_SNAKE_CASE : Dict = CrossEntropyLoss().ignore_index
super().__init__(lowerCAmelCase__ , len(self.labels ) , self.mode )
def UpperCamelCase__ ( self : Any , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.model(**lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__SCREAMING_SNAKE_CASE : int = self(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.hparams
for mode in ["train", "dev", "test"]:
__SCREAMING_SNAKE_CASE : int = self._feature_file(lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = torch.load(lowerCAmelCase__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__SCREAMING_SNAKE_CASE : Dict = self.token_classification_task.read_examples_from_file(args.data_dir , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.token_classification_task.convert_examples_to_features(
lowerCAmelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowerCAmelCase__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self._feature_file(lowerCAmelCase__ )
logger.info("""Loading features from cached file %s""" , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = torch.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__SCREAMING_SNAKE_CASE : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__SCREAMING_SNAKE_CASE : Any = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , batch_size=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
"""Compute validation""" ""
__SCREAMING_SNAKE_CASE : int = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__SCREAMING_SNAKE_CASE : Optional[int] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__SCREAMING_SNAKE_CASE : Dict = self(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[:2]
__SCREAMING_SNAKE_CASE : List[Any] = logits.detach().cpu().numpy()
__SCREAMING_SNAKE_CASE : List[str] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
__SCREAMING_SNAKE_CASE : str = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
__SCREAMING_SNAKE_CASE : int = np.argmax(lowerCAmelCase__ , axis=2 )
__SCREAMING_SNAKE_CASE : int = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__SCREAMING_SNAKE_CASE : Any = dict(enumerate(self.labels ) )
__SCREAMING_SNAKE_CASE : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
__SCREAMING_SNAKE_CASE : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__SCREAMING_SNAKE_CASE : List[Any] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(lowerCAmelCase__ , lowerCAmelCase__ ),
"""precision""": precision_score(lowerCAmelCase__ , lowerCAmelCase__ ),
"""recall""": recall_score(lowerCAmelCase__ , lowerCAmelCase__ ),
"""f1""": fa_score(lowerCAmelCase__ , lowerCAmelCase__ ),
}
__SCREAMING_SNAKE_CASE : Dict = dict(results.items() )
__SCREAMING_SNAKE_CASE : Tuple = results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._eval_end(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self._eval_end(lowerCAmelCase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__SCREAMING_SNAKE_CASE : Any = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=lowerCAmelCase__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=lowerCAmelCase__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase__ : List[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : Any = NERTransformer(args)
UpperCamelCase__ : Optional[Any] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase__ : List[str] = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase__ : Tuple = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 718
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCamelCase__ : Optional[int] = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[Any] = '''maskformer'''
_A : Optional[int] = {'''hidden_size''': '''mask_feature_size'''}
_A : int = ['''resnet''', '''swin''']
_A : Any = ['''detr''']
def __init__( self : Any , lowerCAmelCase__ : int = 2_5_6 , lowerCAmelCase__ : int = 2_5_6 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[Dict] = None , lowerCAmelCase__ : Optional[Dict] = None , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : float = 20.0 , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__SCREAMING_SNAKE_CASE : Any = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config_class.from_dict(lowerCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__SCREAMING_SNAKE_CASE : List[str] = DetrConfig()
else:
# verify that the decoder is supported
__SCREAMING_SNAKE_CASE : List[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = CONFIG_MAPPING[decoder_type]
__SCREAMING_SNAKE_CASE : Dict = config_class.from_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = backbone_config
__SCREAMING_SNAKE_CASE : Optional[int] = decoder_config
# main feature dimension for the model
__SCREAMING_SNAKE_CASE : List[str] = fpn_feature_size
__SCREAMING_SNAKE_CASE : int = mask_feature_size
# initializer
__SCREAMING_SNAKE_CASE : str = init_std
__SCREAMING_SNAKE_CASE : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
__SCREAMING_SNAKE_CASE : Tuple = cross_entropy_weight
__SCREAMING_SNAKE_CASE : Dict = dice_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = mask_weight
__SCREAMING_SNAKE_CASE : Optional[Any] = use_auxiliary_loss
__SCREAMING_SNAKE_CASE : Union[str, Any] = no_object_weight
__SCREAMING_SNAKE_CASE : Dict = output_auxiliary_logits
__SCREAMING_SNAKE_CASE : Any = self.decoder_config.encoder_attention_heads
__SCREAMING_SNAKE_CASE : int = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls : List[Any] , lowerCAmelCase__ : PretrainedConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return cls(
backbone_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , **lowerCAmelCase__ , )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : Dict = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : str = self.decoder_config.to_dict()
__SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
| 178
| 0
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( UpperCAmelCase_ ):
lowercase_ : Union[str, Any] = (CMStochasticIterativeScheduler,)
lowercase_ : Optional[Any] = 10
def a ( self : Dict , **lowerCamelCase__ : str ):
"""simple docstring"""
__UpperCamelCase : List[Any] = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase__ )
return config
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Tuple = 10
__UpperCamelCase : Tuple = self.get_scheduler_config()
__UpperCamelCase : Any = self.scheduler_classes[0](**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = scheduler.timesteps[0]
__UpperCamelCase : Dict = scheduler.timesteps[1]
__UpperCamelCase : List[str] = self.dummy_sample
__UpperCamelCase : Optional[int] = 0.1 * sample
__UpperCamelCase : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
__UpperCamelCase : Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self : Dict ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def a ( self : Dict ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase__ )
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase : int = self.scheduler_classes[0]
__UpperCamelCase : Tuple = self.get_scheduler_config()
__UpperCamelCase : str = scheduler_class(**lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = 1
scheduler.set_timesteps(lowerCamelCase__ )
__UpperCamelCase : Dict = scheduler.timesteps
__UpperCamelCase : Tuple = torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = self.dummy_model()
__UpperCamelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase__ ):
# 1. scale model input
__UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# 2. predict noise residual
__UpperCamelCase : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
# 3. predict previous sample x_t-1
__UpperCamelCase : int = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
__UpperCamelCase : Dict = pred_prev_sample
__UpperCamelCase : Dict = torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Tuple = self.scheduler_classes[0]
__UpperCamelCase : Optional[Any] = self.get_scheduler_config()
__UpperCamelCase : Optional[Any] = scheduler_class(**lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase__ )
__UpperCamelCase : int = scheduler.timesteps
__UpperCamelCase : List[str] = torch.manual_seed(0 )
__UpperCamelCase : List[str] = self.dummy_model()
__UpperCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__UpperCamelCase : Any = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# 2. predict noise residual
__UpperCamelCase : int = model(lowerCamelCase__ , lowerCamelCase__ )
# 3. predict previous sample x_t-1
__UpperCamelCase : Union[str, Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
__UpperCamelCase : Dict = pred_prev_sample
__UpperCamelCase : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.scheduler_classes[0]
__UpperCamelCase : Dict = self.get_scheduler_config()
__UpperCamelCase : Optional[int] = scheduler_class(**lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase__ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase__ )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
__UpperCamelCase : Tuple = self.get_scheduler_config()
__UpperCamelCase : str = scheduler_class(**lowerCamelCase__ )
__UpperCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__UpperCamelCase : str = len(lowerCamelCase__ )
with self.assertRaises(lowerCamelCase__ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase__ , timesteps=lowerCamelCase__ )
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : int = self.scheduler_classes[0]
__UpperCamelCase : List[str] = self.get_scheduler_config()
__UpperCamelCase : Any = scheduler_class(**lowerCamelCase__ )
__UpperCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase__ )
| 269
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase = logging.getLogger(__name__)
class _A ( UpperCAmelCase_ ):
def a ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=None , lowerCamelCase__ : int=None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.layer[current_layer](lowerCamelCase__ , lowerCamelCase__ , head_mask[current_layer] )
__UpperCamelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class _A ( UpperCAmelCase_ ):
def __init__( self : Tuple , lowerCamelCase__ : Tuple ):
"""simple docstring"""
super().__init__(lowerCamelCase__ )
__UpperCamelCase : List[str] = BertEncoderWithPabee(lowerCamelCase__ )
self.init_weights()
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Optional[Any] = 0
def a ( self : Union[str, Any] , lowerCamelCase__ : Any ):
"""simple docstring"""
__UpperCamelCase : List[Any] = threshold
def a ( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : int = patience
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : int = 0
__UpperCamelCase : Tuple = 0
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : int = self.inference_layers_num / self.inference_instances_num
__UpperCamelCase : str = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(lowerCamelCase__ )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def a ( self : Any , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__UpperCamelCase : Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Tuple = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__UpperCamelCase : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : Optional[Any] = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ )
if token_type_ids is None:
__UpperCamelCase : str = torch.zeros(lowerCamelCase__ , dtype=torch.long , device=lowerCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = encoder_hidden_states.size()
__UpperCamelCase : Union[str, Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__UpperCamelCase : Union[str, Any] = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = self.invert_attention_mask(lowerCamelCase__ )
else:
__UpperCamelCase : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : int = self.get_head_mask(lowerCamelCase__ , self.config.num_hidden_layers )
__UpperCamelCase : Any = self.embeddings(
input_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = embedding_output
if self.training:
__UpperCamelCase : List[Any] = []
for i in range(self.config.num_hidden_layers ):
__UpperCamelCase : List[Any] = self.encoder.adaptive_forward(
lowerCamelCase__ , current_layer=lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ )
__UpperCamelCase : Tuple = self.pooler(lowerCamelCase__ )
__UpperCamelCase : str = output_layers[i](output_dropout(lowerCamelCase__ ) )
res.append(lowerCamelCase__ )
elif self.patience == 0: # Use all layers for inference
__UpperCamelCase : Union[str, Any] = self.encoder(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__UpperCamelCase : Optional[int] = self.pooler(encoder_outputs[0] )
__UpperCamelCase : Union[str, Any] = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase__ )]
else:
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : List[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__UpperCamelCase : Union[str, Any] = self.encoder.adaptive_forward(
lowerCamelCase__ , current_layer=lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ )
__UpperCamelCase : str = self.pooler(lowerCamelCase__ )
__UpperCamelCase : Any = output_layers[i](lowerCamelCase__ )
if regression:
__UpperCamelCase : Optional[int] = logits.detach()
if patient_result is not None:
__UpperCamelCase : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__UpperCamelCase : List[str] = 0
else:
__UpperCamelCase : Optional[int] = logits.detach().argmax(dim=1 )
if patient_result is not None:
__UpperCamelCase : Union[str, Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase__ ) ):
patient_counter += 1
else:
__UpperCamelCase : str = 0
__UpperCamelCase : Union[str, Any] = logits
if patient_counter == self.patience:
break
__UpperCamelCase : List[str] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , UpperCAmelCase_ , )
class _A ( UpperCAmelCase_ ):
def __init__( self : Any , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(lowerCamelCase__ )
__UpperCamelCase : Dict = config.num_labels
__UpperCamelCase : List[Any] = BertModelWithPabee(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def a ( self : List[Any] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Dict=None , ):
"""simple docstring"""
__UpperCamelCase : str = self.bert(
input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__UpperCamelCase : List[str] = (logits[-1],)
if labels is not None:
__UpperCamelCase : Tuple = None
__UpperCamelCase : Union[str, Any] = 0
for ix, logits_item in enumerate(lowerCamelCase__ ):
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__UpperCamelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__UpperCamelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 269
| 1
|
snake_case__ : List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Return True if there is node that has not iterated.
__lowercase = [False] * len(_SCREAMING_SNAKE_CASE )
__lowercase = [s]
__lowercase = True
while queue:
__lowercase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_SCREAMING_SNAKE_CASE )
__lowercase = True
__lowercase = u
return visited[t]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = [-1] * (len(_SCREAMING_SNAKE_CASE ))
__lowercase = 0
__lowercase = []
__lowercase = [i[:] for i in graph] # Record original cut, copy.
while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = float("Inf" )
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 718
|
from __future__ import annotations
from typing import Any
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowerCamelCase :Dict = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowerCamelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 222
|
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_ ( _snake_case : int = 1000000 , _snake_case : int = 10 ) -> int:
'''simple docstring'''
__magic_name__ : defaultdict = defaultdict(_snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__magic_name__ : Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__magic_name__ : Optional[int] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 124
| 0
|
"""simple docstring"""
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __lowercase = "" , __lowercase = False ):
# Mapping from the first character of the prefix of the node
UpperCAmelCase__ = {}
# A node will be a leaf if the tree contains its word
UpperCAmelCase__ = is_leaf
UpperCAmelCase__ = prefix
def A__ ( self , __lowercase ):
UpperCAmelCase__ = 0
for q, w in zip(self.prefix , __lowercase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A__ ( self , __lowercase ):
for word in words:
self.insert(__lowercase )
def A__ ( self , __lowercase ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCAmelCase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCAmelCase__ = RadixNode(prefix=__lowercase , is_leaf=__lowercase )
else:
UpperCAmelCase__ = self.nodes[word[0]]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = incoming_node.match(
__lowercase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowercase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCAmelCase__ = remaining_prefix
UpperCAmelCase__ = self.nodes[matching_string[0]]
UpperCAmelCase__ = RadixNode(__lowercase , __lowercase )
UpperCAmelCase__ = aux_node
if remaining_word == "":
UpperCAmelCase__ = True
else:
self.nodes[matching_string[0]].insert(__lowercase )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = self.nodes.get(word[0] , __lowercase )
if not incoming_node:
return False
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = incoming_node.match(
__lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowercase )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = self.nodes.get(word[0] , __lowercase )
if not incoming_node:
return False
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = incoming_node.match(
__lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowercase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCAmelCase__ = list(self.nodes.values() )[0]
UpperCAmelCase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCAmelCase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCAmelCase__ = False
# If there is 1 edge, we merge it with its child
else:
UpperCAmelCase__ = list(incoming_node.nodes.values() )[0]
UpperCAmelCase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCAmelCase__ = merging_node.nodes
return True
def A__ ( self , __lowercase = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case__ ( ) ->bool:
UpperCAmelCase__ = """banana bananas bandana band apple all beast""".split()
UpperCAmelCase__ = RadixNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def snake_case__ ( ) ->None:
assert test_trie()
def snake_case__ ( ) ->None:
UpperCAmelCase__ = RadixNode()
UpperCAmelCase__ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(_SCREAMING_SNAKE_CASE )
print("""Words:""" , _SCREAMING_SNAKE_CASE )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 422
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" ) ->List[str]:
with open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) as f:
UpperCAmelCase__ = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for key, info in class_info.items():
UpperCAmelCase__ = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ = thing_ids
UpperCAmelCase__ = class_names
return metadata
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=30 , __lowercase=400 , __lowercase=None , __lowercase=True , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , __lowercase=10 , __lowercase=False , __lowercase=255 , __lowercase="shi-labs/oneformer_demo" , __lowercase="ade20k_panoptic.json" , __lowercase=10 , ):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = min_resolution
UpperCAmelCase__ = max_resolution
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean
UpperCAmelCase__ = image_std
UpperCAmelCase__ = class_info_file
UpperCAmelCase__ = prepare_metadata(__lowercase , __lowercase )
UpperCAmelCase__ = num_text
UpperCAmelCase__ = repo_path
# for the post_process_functions
UpperCAmelCase__ = 2
UpperCAmelCase__ = 10
UpperCAmelCase__ = 10
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = do_reduce_labels
UpperCAmelCase__ = ignore_index
def A__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , __lowercase , __lowercase=False ):
if not batched:
UpperCAmelCase__ = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ = int(self.size["""shortest_edge"""] * h / w )
UpperCAmelCase__ = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ = self.size["""shortest_edge"""]
UpperCAmelCase__ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCAmelCase__ = self.size["""shortest_edge"""]
UpperCAmelCase__ = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ = max(__lowercase , key=lambda __lowercase : item[0] )[0]
UpperCAmelCase__ = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
def A__ ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _UpperCamelCase ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowercase : Tuple = image_processing_class
def A__ ( self ):
UpperCAmelCase__ = OneFormerImageProcessorTester(self )
@property
def A__ ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
self.assertTrue(hasattr(__lowercase , """ignore_index""" ) )
self.assertTrue(hasattr(__lowercase , """class_info_file""" ) )
self.assertTrue(hasattr(__lowercase , """num_text""" ) )
self.assertTrue(hasattr(__lowercase , """repo_path""" ) )
self.assertTrue(hasattr(__lowercase , """metadata""" ) )
self.assertTrue(hasattr(__lowercase , """do_reduce_labels""" ) )
def A__ ( self ):
pass
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , __lowercase=False , __lowercase=False , __lowercase="np" ):
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase__ = self.image_processing_tester.num_labels
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase )
if with_segmentation_maps:
UpperCAmelCase__ = num_labels
if is_instance_map:
UpperCAmelCase__ = list(range(__lowercase ) ) * 2
UpperCAmelCase__ = dict(enumerate(__lowercase ) )
UpperCAmelCase__ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase__ = [Image.fromarray(__lowercase ) for annotation in annotations]
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , __lowercase , return_tensors="""pt""" , instance_id_to_semantic_id=__lowercase , pad_and_return_pixel_mask=__lowercase , )
return inputs
def A__ ( self ):
pass
def A__ ( self ):
def common(__lowercase=False , __lowercase=None ):
UpperCAmelCase__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowercase , is_instance_map=__lowercase , segmentation_type=__lowercase )
UpperCAmelCase__ = inputs["""mask_labels"""]
UpperCAmelCase__ = inputs["""class_labels"""]
UpperCAmelCase__ = inputs["""pixel_values"""]
UpperCAmelCase__ = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(__lowercase , __lowercase , __lowercase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__lowercase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowercase )
common(is_instance_map=__lowercase , segmentation_type="""pil""" )
common(is_instance_map=__lowercase , segmentation_type="""pil""" )
def A__ ( self ):
UpperCAmelCase__ = np.zeros((20, 50) )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = binary_mask_to_rle(__lowercase )
self.assertEqual(len(__lowercase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = fature_extractor.post_process_semantic_segmentation(__lowercase )
self.assertEqual(len(__lowercase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase__ = fature_extractor.post_process_semantic_segmentation(__lowercase , target_sizes=__lowercase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = image_processor.post_process_instance_segmentation(__lowercase , threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , __lowercase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = image_processor.post_process_panoptic_segmentation(__lowercase , threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , __lowercase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 422
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase_ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase_ = TaTokenizerFast
lowerCAmelCase_ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase_ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 326
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = "pytorch_model.bin"
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} ,)
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """A csv or a json file containing the validation data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """The name of the task to train on."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""accuracy""" ,metadata={"""help""": """The evaluation metric used for the task."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""no""" ,metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=10 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 ,metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 ,metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=100 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Random seed for initialization."""} ,)
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
_snake_case : Optional[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_snake_case : Any = dataset.filter(lambda lowercase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_snake_case : Any = int(eval_result * len(lowercase_ ) )
print(lowercase_ )
_snake_case : Optional[int] = dataset.sort('''probability''' , reverse=lowercase_ )
_snake_case : int = dataset.select(range(lowercase_ ) )
_snake_case : Union[str, Any] = dataset.remove_columns(['''label''', '''probability'''] )
_snake_case : int = dataset.rename_column('''prediction''' , '''label''' )
_snake_case : Optional[Any] = dataset.map(lambda lowercase_ : {"label": idalabel[example["label"]]} )
_snake_case : Tuple = dataset.shuffle(seed=args.seed )
_snake_case : Dict = os.path.join(lowercase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase_ , index=lowercase_ )
else:
dataset.to_json(lowercase_ )
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) -> Union[str, Any]:
_snake_case : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_snake_case : Optional[int] = STModelArguments(model_name_or_path=lowercase_ )
_snake_case : Optional[int] = STDataArguments(train_file=lowercase_ , infer_file=lowercase_ )
_snake_case : Union[str, Any] = STTrainingArguments(output_dir=lowercase_ )
_snake_case : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase_ ).items():
setattr(lowercase_ , lowercase_ , lowercase_ )
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
# Sanity checks
_snake_case : Optional[Any] = {}
_snake_case : int = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_snake_case : int = args.train_file
_snake_case : str = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_snake_case : Optional[Any] = args.eval_file
for key in data_files:
_snake_case : Optional[Any] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_snake_case : int = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
_snake_case : Dict = f'''{args.output_dir}/self-train_iter-{{}}'''.format
_snake_case : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
accelerator.wait_for_everyone()
_snake_case : Dict = None
_snake_case : str = None
_snake_case : int = 0
_snake_case : Dict = False
# Show the progress bar
_snake_case : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_snake_case : Union[str, Any] = data_dir_format(lowercase_ )
assert os.path.exists(lowercase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_snake_case : List[Any] = os.path.join(lowercase_ , '''stage-1''' )
_snake_case : str = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase_ , lowercase_ ):
arguments_dict.update({key: value} )
_snake_case : List[Any] = os.path.join(lowercase_ , '''best-checkpoint''' , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowercase_ , lowercase_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowercase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_snake_case : int = os.path.join(lowercase_ , '''best-checkpoint''' )
_snake_case : Any = os.path.join(lowercase_ , '''stage-2''' )
# Update arguments_dict
_snake_case : Dict = model_path
_snake_case : Union[str, Any] = data_files['''train''']
_snake_case : Optional[int] = current_output_dir
_snake_case : Dict = os.path.join(lowercase_ , '''best-checkpoint''' , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowercase_ , lowercase_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowercase_ )
_snake_case : List[Any] = iteration
_snake_case : Any = data_dir_format(iteration + 1 )
_snake_case : Optional[int] = AutoConfig.from_pretrained(os.path.join(lowercase_ , '''best-checkpoint''' ) )
_snake_case : Union[str, Any] = config.idalabel
_snake_case : Tuple = os.path.join(lowercase_ , '''eval_results_best-checkpoint.json''' )
_snake_case : Any = os.path.join(lowercase_ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowercase_ )
with open(lowercase_ , '''r''' ) as f:
_snake_case : Tuple = float(json.load(lowercase_ )[args.eval_metric] )
_snake_case : List[str] = os.path.join(lowercase_ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowercase_ )
# Loading the dataset from local csv or json files.
_snake_case : str = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
_snake_case : Dict = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
shutil.copy(lowercase_ , os.path.join(lowercase_ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase_ ):
shutil.copy(lowercase_ , os.path.join(lowercase_ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.wait_for_everyone()
_snake_case : Tuple = os.path.join(lowercase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_snake_case : Tuple = eval_result
if best_iteration is None:
_snake_case : Union[str, Any] = new_iteration
_snake_case : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_snake_case : Optional[int] = new_iteration
_snake_case : Optional[int] = new_eval_result
_snake_case : Optional[int] = 0
else:
if new_eval_result == best_eval_result:
_snake_case : int = new_iteration
_snake_case : str = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_snake_case : Dict = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowercase_ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase_ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase_ , '''eval_results_best-iteration.json''' ) , )
| 326
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a__ : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a__ : Tuple = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a__ : Optional[int] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a__ : Dict = sorted(arg_to_scheduler.keys())
a__ : List[str] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : List[str] , a__ : argparse.Namespace , a__ : str=None , a__ : Union[str, Any]="base" , a__ : List[str]=None , a__ : Optional[Any]=None , a__ : List[str]=None , **a__ : Dict , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(a__ )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=a__ , **a__ , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , a__ , a__ ):
assert hasattr(self.config , a__ ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , a__ , getattr(self.hparams , a__ ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=a__ , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=a__ , )
else:
UpperCAmelCase = model
def __snake_case ( self : List[str] , *a__ : Optional[Any] , **a__ : str ):
UpperCAmelCase = self.model_type.from_pretrained(*a__ , **a__ )
def __snake_case ( self : int ):
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __snake_case ( self : int ):
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
a__ , lr=self.hparams.learning_rate , scale_parameter=a__ , relative_step=a__ )
else:
UpperCAmelCase = AdamW(
a__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case ( self : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] ):
return self.validation_step(a__ , a__ )
def __snake_case ( self : int , a__ : Any ):
return self.validation_end(a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case ( self : Dict , a__ : Tuple ):
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=a__ )
UpperCAmelCase = len(self.train_dataloader().dataset )
def __snake_case ( self : Any , a__ : str , a__ : int , a__ : bool = False ):
raise NotImplementedError('''You must implement this for your task''' )
def __snake_case ( self : Tuple ):
return self.train_loader
def __snake_case ( self : Any ):
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=a__ )
def __snake_case ( self : Tuple ):
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=a__ )
def __snake_case ( self : Tuple , a__ : Optional[Any] ):
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
a__ , list(filter(a__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case ( self : str , a__ : Dict[str, Any] ):
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(a__ )
self.tokenizer.save_pretrained(a__ )
@staticmethod
def __snake_case ( a__ : str , a__ : Tuple ):
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=a__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(a__ ).parent / '''test_run''' / '''cache''' ) , type=a__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=a__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=a__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=a__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=a__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=a__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=a__ , metavar=a__ , type=a__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=a__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=a__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=a__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=a__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=a__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=a__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def __snake_case ( self : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any] ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def __snake_case ( self : Dict , a__ : Tuple , a__ : str ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(a__ )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def __snake_case ( self : Any , a__ : int , a__ : List[Any] ):
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(a__ )
def __snake_case ( self : List[Any] , a__ : pl.Trainer , a__ : pl.LightningModule ):
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(a__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
def __snake_case ( self : str , a__ : pl.Trainer , a__ : pl.LightningModule ):
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(a__ , '''w''' ) as writer:
for key in sorted(a__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> None:
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=SCREAMING_SNAKE_CASE_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=SCREAMING_SNAKE_CASE_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=SCREAMING_SNAKE_CASE_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=SCREAMING_SNAKE_CASE_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def __snake_case ( SCREAMING_SNAKE_CASE_ : BaseTransformer , SCREAMING_SNAKE_CASE_ : argparse.Namespace , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[int]=[] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(SCREAMING_SNAKE_CASE_ )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 16
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
SCREAMING_SNAKE_CASE_ , weights_summary=SCREAMING_SNAKE_CASE_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=SCREAMING_SNAKE_CASE_ , val_check_interval=1 , num_sanity_val_steps=2 , **SCREAMING_SNAKE_CASE_ , )
if args.do_train:
trainer.fit(SCREAMING_SNAKE_CASE_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 570
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : int , a__ : Optional[Any]=3 , a__ : Tuple=32 , a__ : Optional[int]=3 , a__ : Optional[int]=10 , a__ : Optional[Any]=[8, 16, 32, 64] , a__ : List[str]=[1, 1, 2, 1] , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : Optional[int]="relu" , a__ : Any=3 , a__ : int=None , a__ : List[str]=["stage2", "stage3", "stage4"] , a__ : Optional[int]=[2, 3, 4] , a__ : Optional[Any]=1 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(a__ )
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
UpperCAmelCase = num_groups
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Dict ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __snake_case ( self : Optional[Any] , a__ : str , a__ : Optional[int] , a__ : Union[str, Any] ):
UpperCAmelCase = BitModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , a__ : List[Any] , a__ : Optional[Any] , a__ : Tuple ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = BitForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , a__ : Optional[int] , a__ : str , a__ : int ):
UpperCAmelCase = BitBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = BitBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase =(
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : List[str] ):
UpperCAmelCase = BitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __snake_case ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Optional[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __snake_case ( self : List[str] ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : List[Any] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def __snake_case ( self : int ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=a__ )
for name, module in model.named_modules():
if isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __snake_case ( self : int ):
def check_hidden_states_output(a__ : List[str] , a__ : Tuple , a__ : str ):
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase = layer_type
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __snake_case ( self : int ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Dict ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(BitBackbone,) if is_torch_available() else ()
_lowerCamelCase =BitConfig
_lowerCamelCase =False
def __snake_case ( self : Dict ):
UpperCAmelCase = BitModelTester(self )
| 570
| 1
|
def A ( _lowercase = 100 ):
SCREAMING_SNAKE_CASE : str = set()
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : str = n + 1 # maximum limit
for a in range(2 , _snake_case ):
for b in range(2 , _snake_case ):
SCREAMING_SNAKE_CASE : Tuple = a**b # calculates the current power
collect_powers.add(_snake_case ) # adds the result to the set
return len(_snake_case )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 248
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case ) , 'Tatoeba directory does not exist.' )
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
self.resolver.convert_models(["heb-eng"] )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : str = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_a )
assert mmeta["long_pair"] == "heb-eng"
| 124
| 0
|
import math
class A_ :
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0): # a graph with Node 0,1,...,N-1
__lowerCamelCase : str = n
__lowerCamelCase : Optional[Any] = [
[math.inf for j in range(0 ,SCREAMING_SNAKE_CASE__)] for i in range(0 ,SCREAMING_SNAKE_CASE__)
] # adjacency matrix for weight
__lowerCamelCase : Any = [
[math.inf for j in range(0 ,SCREAMING_SNAKE_CASE__)] for i in range(0 ,SCREAMING_SNAKE_CASE__)
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Any = w
def lowerCAmelCase ( self : Dict):
for k in range(0 ,self.n):
for i in range(0 ,self.n):
for j in range(0 ,self.n):
__lowerCamelCase : Tuple = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j])
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
return self.dp[u][v]
if __name__ == "__main__":
a =Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 720
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def lowerCAmelCase ( self : Tuple):
torch.manual_seed(0)
__lowerCamelCase : Optional[int] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
__lowerCamelCase : List[str] = UNetaDConditionModel(
sample_size=3_2 ,layers_per_block=1 ,block_out_channels=[3_2, 6_4] ,down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] ,mid_block_type='UNetMidBlock2DSimpleCrossAttn' ,up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] ,in_channels=3 ,out_channels=6 ,cross_attention_dim=3_2 ,encoder_hid_dim=3_2 ,attention_head_dim=8 ,addition_embed_type='text' ,addition_embed_type_num_heads=2 ,cross_attention_norm='group_norm' ,resnet_time_scale_shift='scale_shift' ,act_fn='gelu' ,)
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
__lowerCamelCase : Dict = DDPMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=SCREAMING_SNAKE_CASE__ ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type='epsilon' ,variance_type='learned_range' ,)
torch.manual_seed(0)
__lowerCamelCase : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self : Any):
torch.manual_seed(0)
__lowerCamelCase : int = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
__lowerCamelCase : Any = UNetaDConditionModel(
sample_size=3_2 ,layers_per_block=[1, 2] ,block_out_channels=[3_2, 6_4] ,down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] ,mid_block_type='UNetMidBlock2DSimpleCrossAttn' ,up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] ,in_channels=6 ,out_channels=6 ,cross_attention_dim=3_2 ,encoder_hid_dim=3_2 ,attention_head_dim=8 ,addition_embed_type='text' ,addition_embed_type_num_heads=2 ,cross_attention_norm='group_norm' ,resnet_time_scale_shift='scale_shift' ,act_fn='gelu' ,class_embed_type='timestep' ,mid_block_scale_factor=1.414 ,time_embedding_act_fn='gelu' ,time_embedding_dim=3_2 ,)
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
__lowerCamelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=SCREAMING_SNAKE_CASE__ ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type='epsilon' ,variance_type='learned_range' ,)
torch.manual_seed(0)
__lowerCamelCase : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='squaredcos_cap_v2' ,beta_start=0.0001 ,beta_end=0.02 ,)
torch.manual_seed(0)
__lowerCamelCase : Any = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inputs['prompt']
__lowerCamelCase : str = inputs['generator']
__lowerCamelCase : List[Any] = inputs['num_inference_steps']
__lowerCamelCase : Optional[Any] = inputs['output_type']
if "image" in inputs:
__lowerCamelCase : Dict = inputs['image']
else:
__lowerCamelCase : Optional[Any] = None
if "mask_image" in inputs:
__lowerCamelCase : Optional[int] = inputs['mask_image']
else:
__lowerCamelCase : Dict = None
if "original_image" in inputs:
__lowerCamelCase : Dict = inputs['original_image']
else:
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe.encode_prompt(SCREAMING_SNAKE_CASE__)
# inputs with prompt converted to embeddings
__lowerCamelCase : Union[str, Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__lowerCamelCase : List[str] = image
if mask_image is not None:
__lowerCamelCase : List[Any] = mask_image
if original_image is not None:
__lowerCamelCase : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__)
pipe_loaded.to(SCREAMING_SNAKE_CASE__)
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) is None ,F"`{optional_component}` did not stay set to None after loading." ,)
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inputs['generator']
__lowerCamelCase : Any = inputs['num_inference_steps']
__lowerCamelCase : List[str] = inputs['output_type']
# inputs with prompt converted to embeddings
__lowerCamelCase : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__lowerCamelCase : Optional[int] = image
if mask_image is not None:
__lowerCamelCase : int = mask_image
if original_image is not None:
__lowerCamelCase : int = original_image
__lowerCamelCase : List[Any] = pipe_loaded(**SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Dict = np.abs(to_np(SCREAMING_SNAKE_CASE__) - to_np(SCREAMING_SNAKE_CASE__)).max()
self.assertLess(SCREAMING_SNAKE_CASE__ ,1E-4)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : str = self.get_dummy_components()
__lowerCamelCase : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__)
pipe_loaded.to(SCREAMING_SNAKE_CASE__)
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = pipe_loaded(**SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : int = np.abs(to_np(SCREAMING_SNAKE_CASE__) - to_np(SCREAMING_SNAKE_CASE__)).max()
self.assertLess(SCREAMING_SNAKE_CASE__ ,1E-4)
| 337
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( _A ):
a_ = ["""image_processor""", """tokenizer"""]
a_ = """LayoutLMv2ImageProcessor"""
a_ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase__ , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
UpperCAmelCase_ = self.image_processor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ = features["words"]
UpperCAmelCase_ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel values
UpperCAmelCase_ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCAmelCase_ = self.get_overflowing_images(UpperCamelCase__ , encoded_inputs["overflow_to_sample_mapping"] )
UpperCAmelCase_ = images
return encoded_inputs
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}""" )
return images_with_overflow
def lowerCamelCase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , )
return self.image_processor
| 660
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
class lowercase ( _lowercase ):
"""simple docstring"""
pass
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(UpperCAmelCase_ ):
yield {"i": i, "shard": shard}
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = int(os.environ['RANK'] )
_UpperCamelCase : str = int(os.environ['WORLD_SIZE'] )
_UpperCamelCase : List[str] = ArgumentParser()
parser.add_argument('--streaming' , type=UpperCAmelCase_ )
parser.add_argument('--local_rank' , type=UpperCAmelCase_ )
parser.add_argument('--num_workers' , type=UpperCAmelCase_ , default=0 )
_UpperCamelCase : Optional[Any] = parser.parse_args()
_UpperCamelCase : Optional[int] = args.streaming
_UpperCamelCase : int = args.num_workers
_UpperCamelCase : List[str] = {'shards': [F'''shard_{shard_idx}''' for shard_idx in range(UpperCAmelCase_ )]}
_UpperCamelCase : Tuple = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ )
if not streaming:
_UpperCamelCase : List[Any] = Dataset.from_list(list(UpperCAmelCase_ ) )
_UpperCamelCase : int = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ )
_UpperCamelCase : Any = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ )
_UpperCamelCase : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_UpperCamelCase : Union[str, Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_UpperCamelCase : int = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= parent
SCREAMING_SNAKE_CASE__: List[Any]= 13
SCREAMING_SNAKE_CASE__: Dict= 7
SCREAMING_SNAKE_CASE__: int= True
SCREAMING_SNAKE_CASE__: Dict= True
SCREAMING_SNAKE_CASE__: Optional[Any]= False
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
SCREAMING_SNAKE_CASE__: str= 99
SCREAMING_SNAKE_CASE__: List[str]= 32
SCREAMING_SNAKE_CASE__: Union[str, Any]= 2
SCREAMING_SNAKE_CASE__: List[str]= 4
SCREAMING_SNAKE_CASE__: Union[str, Any]= 37
SCREAMING_SNAKE_CASE__: int= """gelu"""
SCREAMING_SNAKE_CASE__: Dict= 0.1
SCREAMING_SNAKE_CASE__: int= 0.1
SCREAMING_SNAKE_CASE__: Dict= 512
SCREAMING_SNAKE_CASE__: Any= 16
SCREAMING_SNAKE_CASE__: List[str]= 2
SCREAMING_SNAKE_CASE__: Tuple= 0.02
SCREAMING_SNAKE_CASE__: Tuple= 3
SCREAMING_SNAKE_CASE__: List[Any]= 4
SCREAMING_SNAKE_CASE__: List[str]= None
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: List[str]= None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__: List[str]= random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__: Dict= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: List[str]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: Optional[int]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__: List[str]= ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__: Dict= DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: Tuple= TFDistilBertModel(config=__a )
SCREAMING_SNAKE_CASE__: Tuple= {"""input_ids""": input_ids, """attention_mask""": input_mask}
SCREAMING_SNAKE_CASE__: Optional[int]= model(__a )
SCREAMING_SNAKE_CASE__: int= [input_ids, input_mask]
SCREAMING_SNAKE_CASE__: int= model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Dict= TFDistilBertForMaskedLM(config=__a )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {"""input_ids""": input_ids, """attention_mask""": input_mask}
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= TFDistilBertForQuestionAnswering(config=__a )
SCREAMING_SNAKE_CASE__: List[str]= {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
SCREAMING_SNAKE_CASE__: List[Any]= model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.num_labels
SCREAMING_SNAKE_CASE__: Dict= TFDistilBertForSequenceClassification(__a )
SCREAMING_SNAKE_CASE__: Tuple= {"""input_ids""": input_ids, """attention_mask""": input_mask}
SCREAMING_SNAKE_CASE__: int= model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: int= self.num_choices
SCREAMING_SNAKE_CASE__: int= TFDistilBertForMultipleChoice(__a )
SCREAMING_SNAKE_CASE__: str= tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__: int= tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE__: Dict= model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Dict= self.num_labels
SCREAMING_SNAKE_CASE__: Optional[int]= TFDistilBertForTokenClassification(__a )
SCREAMING_SNAKE_CASE__: List[Any]= {"""input_ids""": input_ids, """attention_mask""": input_mask}
SCREAMING_SNAKE_CASE__: str= model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE__): Dict= config_and_inputs
SCREAMING_SNAKE_CASE__: Optional[int]= {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( a__ , a__ , unittest.TestCase ):
__a = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__a = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[Any]= TFDistilBertModelTester(self )
SCREAMING_SNAKE_CASE__: Dict= ConfigTester(self , config_class=__a , dim=37 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
SCREAMING_SNAKE_CASE__: Optional[Any]= TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
SCREAMING_SNAKE_CASE__: Any= tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__: Optional[Any]= model(__a )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= [1, 6, 768]
self.assertEqual(output.shape , __a )
SCREAMING_SNAKE_CASE__: Any= tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 64
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : str = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
_a = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
_a = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_a = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_a = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase_ : Optional[Any] = self.task_name.lower()
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "train"
_a = "dev"
_a = "test"
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = 42
_a = 42
_a = 42
def __init__( self : Any , __a : GlueDataTrainingArguments , __a : PreTrainedTokenizerBase , __a : Optional[int] = None , __a : Union[str, Split] = Split.train , __a : Optional[str] = None , ) ->Optional[int]:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , __a , )
lowerCamelCase_ : Optional[int] = args
lowerCamelCase_ : Tuple = glue_processors[args.task_name]()
lowerCamelCase_ : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(__a , __a ):
try:
lowerCamelCase_ : List[Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
lowerCamelCase_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
lowerCamelCase_ : Any = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_, lowerCamelCase_ : int = label_list[2], label_list[1]
lowerCamelCase_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ : List[Any] = cached_features_file + """.lock"""
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
lowerCamelCase_ : str = time.time()
lowerCamelCase_ : int = torch.load(__a )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
lowerCamelCase_ : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase_ : Tuple = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase_ : List[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase_ : Dict = examples[:limit_length]
lowerCamelCase_ : Union[str, Any] = glue_convert_examples_to_features(
__a , __a , max_length=args.max_seq_length , label_list=__a , output_mode=self.output_mode , )
lowerCamelCase_ : Optional[Any] = time.time()
torch.save(self.features , __a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Any ) ->Any:
return len(self.features )
def __getitem__( self : List[Any] , __a : Optional[int] ) ->InputFeatures:
return self.features[i]
def _lowerCAmelCase ( self : int ) ->Optional[int]:
return self.label_list
| 278
| 0
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCamelCase_ : Dict = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = list(s_dict.keys() )
for key in keys:
UpperCamelCase__ = r""".*/layers_(\d+)"""
UpperCamelCase__ = key
if re.match(_A , _A ):
UpperCamelCase__ = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , _A )
UpperCamelCase__ = r"""(encoder|decoder)\/"""
if re.match(_A , _A ):
UpperCamelCase__ = re.match(_A , _A ).groups()
if groups[0] == "encoder":
UpperCamelCase__ = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , _A )
UpperCamelCase__ = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , _A )
elif groups[0] == "decoder":
UpperCamelCase__ = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , _A )
UpperCamelCase__ = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , _A )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCamelCase__ = new_key.replace(_A , _A )
print(f"{key} -> {new_key}" )
UpperCamelCase__ = s_dict.pop(_A )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase__ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase__ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCamelCase__ = s_dict[key].shape[0]
UpperCamelCase__ = s_dict[key]
for idx in range(_A ):
UpperCamelCase__ = expert_weihts[idx]
print(f"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(_A )
return s_dict
lowerCamelCase_ : List[str] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __magic_name__( _A , _A ):
'''simple docstring'''
import regex as re
with open(_A , """r""" ) as f:
UpperCamelCase__ = f.read()
UpperCamelCase__ = re.findall(r"""(.*) = ([0-9.]*)""" , _A )
UpperCamelCase__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCamelCase__ = float(_A ) if """.""" in value else int(_A )
UpperCamelCase__ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , _A )[0]
UpperCamelCase__ = str(activation[1] )
UpperCamelCase__ = num_experts
UpperCamelCase__ = SwitchTransformersConfig(**_A )
return config
def __magic_name__( _A , _A , _A=None , _A="./" , _A=8 ):
'''simple docstring'''
print(f"Loading flax weights from : {flax_checkpoint_path}" )
UpperCamelCase__ = checkpoints.load_tax_checkpoint(_A )
if gin_file is not None:
UpperCamelCase__ = convert_gin_to_config(_A , _A )
else:
UpperCamelCase__ = SwitchTransformersConfig.from_pretrained(_A )
UpperCamelCase__ = SwitchTransformersForConditionalGeneration(_A )
UpperCamelCase__ = flax_params["""target"""]
UpperCamelCase__ = flatten_dict(_A , sep="""/""" )
UpperCamelCase__ = rename_keys(_A )
UpperCamelCase__ = unflatten_dict(_A , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_A , _A )
print(f"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_A )
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
lowerCamelCase_ : str = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 265
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : str = field(default="audio-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__a : ClassVar[Features] = Features({"audio": Audio()} )
__a : ClassVar[Features] = Features({"labels": ClassLabel} )
__a : str = "audio"
__a : str = "labels"
def A ( self : List[Any] , lowercase : List[Any] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def A ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 265
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__lowercase : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowercase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase ( __A : str ) -> List[str]:
'''simple docstring'''
with open(_UpperCAmelCase , """rb""" ) as f:
snake_case : Optional[int] = Image.open(_UpperCAmelCase )
return im.convert("""RGB""" )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__lowerCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
__lowerCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__lowerCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__lowerCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def snake_case_ ( self ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCAmelCase__ )} , )
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__lowerCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowerCamelCase : str = field(default=UpperCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__lowerCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__lowerCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = torch.stack([example["""pixel_values"""] for example in examples] )
snake_case : Tuple = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase ( ) -> List[Any]:
'''simple docstring'''
snake_case : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : str = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case : Optional[Any] = {}
if data_args.train_dir is not None:
snake_case : Tuple = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
snake_case : Optional[int] = os.path.join(data_args.validation_dir , """**""" )
snake_case : Dict = load_dataset(
"""imagefolder""" , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case : Optional[int] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0:
snake_case : int = dataset["""train"""].train_test_split(data_args.train_val_split )
snake_case : Dict = split["""train"""]
snake_case : int = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : Tuple = dataset["""train"""].features["""labels"""].names
snake_case : Tuple = {}, {}
for i, label in enumerate(_UpperCAmelCase ):
snake_case : int = str(_UpperCAmelCase )
snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case : Optional[int] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : Dict ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCAmelCase ) , labelaid=_UpperCAmelCase , idalabel=_UpperCAmelCase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : int = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case : Optional[int] = image_processor.size["""shortest_edge"""]
else:
snake_case : str = (image_processor.size["""height"""], image_processor.size["""width"""])
snake_case : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case : Tuple = Compose(
[
RandomResizedCrop(_UpperCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case : Tuple = Compose(
[
Resize(_UpperCAmelCase ),
CenterCrop(_UpperCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__A : Any ):
snake_case : Optional[int] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(__A : Tuple ):
snake_case : Tuple = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
snake_case : List[str] = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
snake_case : int = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCAmelCase )
# Initalize our trainer
snake_case : Optional[Any] = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
snake_case : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : List[str] = last_checkpoint
snake_case : int = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : Dict = trainer.evaluate()
trainer.log_metrics("""eval""" , _UpperCAmelCase )
trainer.save_metrics("""eval""" , _UpperCAmelCase )
# Write model card and (optionally) push to hub
snake_case : List[Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
if __name__ == "__main__":
main()
| 36
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694
| 0
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case_ (_a : List[str] ):
UpperCAmelCase = {}
UpperCAmelCase = job['''started_at''']
UpperCAmelCase = job['''completed_at''']
UpperCAmelCase = date_parser.parse(_a )
UpperCAmelCase = date_parser.parse(_a )
UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase = start
UpperCAmelCase = end
UpperCAmelCase = duration_in_min
return job_info
def snake_case_ (_a : str , _a : List[str]=None ):
UpperCAmelCase = None
if token is not None:
UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase = requests.get(_a , headers=_a ).json()
UpperCAmelCase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(_a ) for job in result['''jobs''']} )
UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(_a ):
UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=_a ).json()
job_time.update({job['''name''']: extract_time_from_single_job(_a ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
A =parser.parse_args()
A =get_job_time(args.workflow_run_id)
A =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 358
|
'''simple docstring'''
from __future__ import annotations
import requests
def snake_case_ (_a : str ):
UpperCAmelCase = F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_a ).json()
def snake_case_ (_a : int = 1_0 ):
UpperCAmelCase = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
UpperCAmelCase = requests.get(_a ).json()[:max_stories]
return [get_hackernews_story(_a ) for story_id in story_ids]
def snake_case_ (_a : int = 1_0 ):
UpperCAmelCase = hackernews_top_stories(_a )
return "\n".join('''* [{title}]({url})'''.format(**_a ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 358
| 1
|
import numpy as np
from transformers import Pipeline
def UpperCamelCase__ ( _A: Optional[int] ):
'''simple docstring'''
__lowerCamelCase = np.max(_A , axis=-1 , keepdims=_A )
__lowerCamelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_A )
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase_ ( self , **UpperCAmelCase ):
__lowerCamelCase = {}
if "second_text" in kwargs:
__lowerCamelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=None ):
return self.tokenizer(UpperCAmelCase , text_pair=UpperCAmelCase , return_tensors=self.framework )
def lowerCamelCase_ ( self , UpperCAmelCase ):
return self.model(**UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = model_outputs.logits[0].numpy()
__lowerCamelCase = softmax(UpperCAmelCase )
__lowerCamelCase = np.argmax(UpperCAmelCase )
__lowerCamelCase = self.model.config.idalabel[best_class]
__lowerCamelCase = probabilities[best_class].item()
__lowerCamelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 479
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """mock-s3-bucket"""
__lowerCamelCase = f'''s3://{mock_bucket}'''
__lowerCamelCase = extract_path_from_uri(_A )
assert dataset_path.startswith("""s3://""" ) is False
__lowerCamelCase = """./local/path"""
__lowerCamelCase = extract_path_from_uri(_A )
assert dataset_path == new_dataset_path
def UpperCamelCase__ ( _A: List[Any] ):
'''simple docstring'''
__lowerCamelCase = is_remote_filesystem(_A )
assert is_remote is True
__lowerCamelCase = fsspec.filesystem("""file""" )
__lowerCamelCase = is_remote_filesystem(_A )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , _A )
def UpperCamelCase__ ( _A: List[str] , _A: Tuple , _A: List[Any] , _A: Any , _A: List[Any] , _A: Optional[int] , _A: List[str] ):
'''simple docstring'''
__lowerCamelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
__lowerCamelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
__lowerCamelCase = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
__lowerCamelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_A )
assert isinstance(_A , _A )
__lowerCamelCase = os.path.basename(_A )
__lowerCamelCase = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(_A , """r""" , encoding="""utf-8""" ) as f, open(_A , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def UpperCamelCase__ ( _A: Optional[Any] , _A: Union[str, Any] , _A: int ):
'''simple docstring'''
__lowerCamelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
__lowerCamelCase = compressed_file_paths[protocol]
__lowerCamelCase = """dataset.jsonl"""
__lowerCamelCase = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
__lowerCamelCase , *__lowerCamelCase = fsspec.get_fs_token_paths(_A )
assert fs.isfile(_A )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def UpperCamelCase__ ( _A: str , _A: str , _A: Optional[int] , _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = hf_api.dataset_info(_A , token=_A )
__lowerCamelCase = HfFileSystem(repo_info=_A , token=_A )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(_A ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_A , _A , clobber=_A )
with pytest.warns(_A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_A ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 479
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__magic_name__ : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__magic_name__ : Union[str, Any] = logging.WARNING
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = os.getenv('DATASETS_VERBOSITY' , lowercase_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys()) }')
return _default_log_level
def lowercase__ ( ) -> str:
"""simple docstring"""
return __name__.split('.')[0]
def lowercase__ ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name())
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCamelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCamelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def lowercase__ ( _UpperCamelCase = None) -> logging.Logger:
"""simple docstring"""
if name is None:
UpperCamelCase = _get_library_name()
return logging.getLogger(lowercase_)
def lowercase__ ( ) -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowercase__ ( _UpperCamelCase) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(lowercase_)
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
return set_verbosity(lowercase_)
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(lowercase_)
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
return set_verbosity(lowercase_)
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
return set_verbosity(lowercase_)
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCamelCase = False
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCamelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A__ :
'''simple docstring'''
def __init__( self : Optional[Any] , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : List[str] ): # pylint: disable=unused-argument
"""simple docstring"""
UpperCamelCase = args[0] if args else None
def __iter__( self : str ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Any , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
def empty_fn(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ):
"""simple docstring"""
return self
def __exit__( self : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return
__magic_name__ : List[Any] = True
class A__ :
'''simple docstring'''
def __call__( self : Optional[Any] , *_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=False , **_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__a , **__a )
else:
return EmptyTqdm(*__a , **__a )
def _SCREAMING_SNAKE_CASE ( self : Dict , *_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__a , **__a )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : Union[str, Any] = _tqdm_cls()
def lowercase__ ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active)
def lowercase__ ( ) -> str:
"""simple docstring"""
global _tqdm_active
UpperCamelCase = True
def lowercase__ ( ) -> str:
"""simple docstring"""
global _tqdm_active
UpperCamelCase = False
| 707
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
if self.add_downsample:
UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=True ):
"""simple docstring"""
UpperCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = True
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
if self.add_downsample:
UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase = ()
for resnet in self.resnets:
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
if self.add_upsample:
UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCamelCase = res_hidden_states_tuple[-1]
UpperCamelCase = res_hidden_states_tuple[:-1]
UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCamelCase = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = True
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
if self.add_upsample:
UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCamelCase = res_hidden_states_tuple[-1]
UpperCamelCase = res_hidden_states_tuple[:-1]
UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCamelCase = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCamelCase = []
for _ in range(self.num_layers ):
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
def __call__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str=True ):
"""simple docstring"""
UpperCamelCase = self.resnets[0](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states
| 410
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool:
__lowerCamelCase : Optional[int] = str(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) == 9 and set(UpperCAmelCase_ ) == set('123456789' )
def UpperCAmelCase__ ( ) -> int | None:
for base_num in range(99_99 , 49_99 , -1 ):
__lowerCamelCase : Any = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
__lowerCamelCase : List[str] = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
A_ : Union[str, Any] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'ernie_m'
lowerCamelCase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , __UpperCAmelCase : int = 2_5_0_0_0_2 , __UpperCAmelCase : int = 7_6_8 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 3_0_7_2 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 5_1_4 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 1e-05 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Union[str, Any]=0.0 , **__UpperCAmelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = is_decoder
SCREAMING_SNAKE_CASE__ = act_dropout
| 196
| 0
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=1e-1_2 ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCamelCase__ , axis=1 ) , a_min=lowerCamelCase__ ) ).T
lowerCAmelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCamelCase__ , axis=1 ) , a_min=lowerCamelCase__ ) ).T
return jnp.matmul(lowerCamelCase__ , norm_emb_a.T )
class lowercase__ ( nn.Module ):
a_ =42
a_ =jnp.floataa
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ = nn.Dense(self.config.projection_dim , use_bias=__UpperCAmelCase , dtype=self.dtype )
lowerCAmelCase__ = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCAmelCase__ = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCAmelCase__ = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
lowerCAmelCase__ = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.vision_model(__UpperCAmelCase )[1]
lowerCAmelCase__ = self.visual_projection(__UpperCAmelCase )
lowerCAmelCase__ = jax_cosine_distance(__UpperCAmelCase , self.special_care_embeds )
lowerCAmelCase__ = jax_cosine_distance(__UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ = jnp.round(__UpperCAmelCase , 3 )
lowerCAmelCase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCAmelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ = is_special_care * 0.01
lowerCAmelCase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ = jnp.round(__UpperCAmelCase , 3 )
lowerCAmelCase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowercase__ ( _UpperCAmelCase ):
a_ =CLIPConfig
a_ ="""clip_input"""
a_ =FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = jnp.floataa , __UpperCAmelCase = True , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
if input_shape is None:
lowerCAmelCase__ = (1, 224, 224, 3)
lowerCAmelCase__ = self.module_class(config=__UpperCAmelCase , dtype=__UpperCAmelCase , **__UpperCAmelCase )
super().__init__(__UpperCAmelCase , __UpperCAmelCase , input_shape=__UpperCAmelCase , seed=__UpperCAmelCase , dtype=__UpperCAmelCase , _do_init=_do_init )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None )-> FrozenDict:
'''simple docstring'''
lowerCAmelCase__ = jax.random.normal(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = jax.random.split(__UpperCAmelCase )
lowerCAmelCase__ = {"params": params_rng, "dropout": dropout_rng}
lowerCAmelCase__ = self.module.init(__UpperCAmelCase , __UpperCAmelCase )["params"]
return random_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 721
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
a_, a_, a_ = False, False, False
@dataclass
class lowercase__ :
a_ =None
a_ =True
a_ =True
a_ =None
# Automatically constructed
a_ ="dict"
a_ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a_ =field(default="""Audio""", init=_UpperCAmelCase, repr=_UpperCAmelCase )
def __call__( self )-> Optional[int]:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self , __UpperCAmelCase )-> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase__ = BytesIO()
sf.write(__UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase__ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowerCAmelCase__ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
lowerCAmelCase__ = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
lowerCAmelCase__ , lowerCAmelCase__ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
lowerCAmelCase__ = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
lowerCAmelCase__ = token_per_repo_id or {}
lowerCAmelCase__ = path.split("::" )[-1]
try:
lowerCAmelCase__ = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"]
lowerCAmelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase__ = None
with xopen(__UpperCAmelCase , "rb" , use_auth_token=__UpperCAmelCase ) as f:
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(__UpperCAmelCase )
else:
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(__UpperCAmelCase )
lowerCAmelCase__ = array.T
if self.mono:
lowerCAmelCase__ = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase__ = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
lowerCAmelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self )-> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCAmelCase ( self , __UpperCAmelCase )-> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
lowerCAmelCase__ = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCAmelCase__ = storage.field("bytes" )
else:
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCAmelCase__ = storage.field("path" )
else:
lowerCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def UpperCAmelCase ( self , __UpperCAmelCase )-> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase ):
with xopen(__UpperCAmelCase , "rb" ) as f:
lowerCAmelCase__ = f.read()
return bytes_
lowerCAmelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase__ = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 115
| 0
|
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_snake_case , (list, tuple) ) or not all(
isinstance(_snake_case , _snake_case ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = numbers[0]
for i in range(1 , len(_snake_case ) ):
# update the maximum and minimum subarray products
UpperCAmelCase = numbers[i]
if number < 0:
UpperCAmelCase , UpperCAmelCase = min_till_now, max_till_now
UpperCAmelCase = max(_snake_case , max_till_now * number )
UpperCAmelCase = min(_snake_case , min_till_now * number )
# update the maximum product found till now
UpperCAmelCase = max(_snake_case , _snake_case )
return max_prod
| 341
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if (ksize % 2) == 0:
UpperCAmelCase = ksize + 1
UpperCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_snake_case ):
for x in range(_snake_case ):
# distance from center
UpperCAmelCase = x - ksize // 2
UpperCAmelCase = y - ksize // 2
# degree to radiant
UpperCAmelCase = theta / 180 * np.pi
UpperCAmelCase = np.cos(_theta )
UpperCAmelCase = np.sin(_theta )
# get kernel x
UpperCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_UpperCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_UpperCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_UpperCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_UpperCamelCase = out / out.max() * 255
_UpperCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 341
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: Optional[Any]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Union[str, Any]= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: List[str]= parent
SCREAMING_SNAKE_CASE__: Optional[Any]= batch_size
SCREAMING_SNAKE_CASE__: str= num_channels
SCREAMING_SNAKE_CASE__: Any= min_resolution
SCREAMING_SNAKE_CASE__: Optional[int]= max_resolution
SCREAMING_SNAKE_CASE__: Dict= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Optional[Any]= size
SCREAMING_SNAKE_CASE__: List[Any]= crop_pct
SCREAMING_SNAKE_CASE__: List[str]= crop_size
SCREAMING_SNAKE_CASE__: List[str]= do_normalize
SCREAMING_SNAKE_CASE__: Dict= image_mean
SCREAMING_SNAKE_CASE__: Tuple= image_std
def UpperCamelCase_ ( self ) -> Any:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
def UpperCamelCase_ ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: str= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[str]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: int= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: int= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 107
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _lowerCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase ) -> str:
super().__init__()
SCREAMING_SNAKE_CASE__: Union[str, Any]= torchvision.models.resnetaaa(pretrained=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= list(model.children() )[:-2]
SCREAMING_SNAKE_CASE__: Tuple= nn.Sequential(*lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[str]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
SCREAMING_SNAKE_CASE__: str= self.pool(self.model(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: str= torch.flatten(lowerCAmelCase , start_dim=2 )
SCREAMING_SNAKE_CASE__: Optional[Any]= out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Optional[int]= [json.loads(lowerCAmelCase ) for l in open(lowerCAmelCase )]
SCREAMING_SNAKE_CASE__: List[str]= os.path.dirname(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= tokenizer
SCREAMING_SNAKE_CASE__: Any= labels
SCREAMING_SNAKE_CASE__: List[Any]= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= transforms
def __len__( self ) -> Optional[Any]:
return len(self.data )
def __getitem__( self , lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE__: Tuple= sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE__: Dict= 1
SCREAMING_SNAKE_CASE__: Optional[int]= Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: int= self.transforms(lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[Any]= Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Any= [len(row['''sentence'''] ) for row in batch]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= len(snake_case_ ), max(snake_case_ )
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.zeros(snake_case_ , snake_case_ , dtype=torch.long )
SCREAMING_SNAKE_CASE__: List[Any]= torch.zeros(snake_case_ , snake_case_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(snake_case_ , snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Optional[int]= input_row['''sentence''']
SCREAMING_SNAKE_CASE__: Dict= 1
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.stack([row['''image'''] for row in batch] )
SCREAMING_SNAKE_CASE__: List[str]= torch.stack([row['''label'''] for row in batch] )
SCREAMING_SNAKE_CASE__: Any= torch.stack([row['''image_start_token'''] for row in batch] )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A__ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A__ ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 107
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Any = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = """unispeech-sat"""
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.0_5 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="mean" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=504 ,**_SCREAMING_SNAKE_CASE ,) -> Any:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
_snake_case = hidden_size
_snake_case = feat_extract_norm
_snake_case = feat_extract_activation
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = conv_bias
_snake_case = num_conv_pos_embeddings
_snake_case = num_conv_pos_embedding_groups
_snake_case = len(self.conv_dim )
_snake_case = num_hidden_layers
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = num_attention_heads
_snake_case = hidden_dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = feat_proj_dropout
_snake_case = final_dropout
_snake_case = layerdrop
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = vocab_size
_snake_case = num_clusters
_snake_case = do_stable_layer_norm
_snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case = apply_spec_augment
_snake_case = mask_time_prob
_snake_case = mask_time_length
_snake_case = mask_time_min_masks
_snake_case = mask_feature_prob
_snake_case = mask_feature_length
_snake_case = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_snake_case = num_codevectors_per_group
_snake_case = num_codevector_groups
_snake_case = contrastive_logits_temperature
_snake_case = feat_quantizer_dropout
_snake_case = num_negatives
_snake_case = codevector_dim
_snake_case = proj_codevector_dim
_snake_case = diversity_loss_weight
# ctc loss
_snake_case = ctc_loss_reduction
_snake_case = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = xvector_output_dim
@property
def _lowercase ( self ) -> List[str]:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 185
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase_ : Dict = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ : Tuple = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : str = BartTokenizer
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="replace" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[Any]:
super().__init__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,errors=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE ,trim_offsets=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,_SCREAMING_SNAKE_CASE ) != add_prefix_space:
_snake_case = getattr(_SCREAMING_SNAKE_CASE ,pre_tok_state.pop("type" ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**_SCREAMING_SNAKE_CASE )
_snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_snake_case = "post_processor"
_snake_case = getattr(self.backend_tokenizer ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state["sep"] )
if "cls" in state:
_snake_case = tuple(state["cls"] )
_snake_case = False
if state.get("add_prefix_space" ,_SCREAMING_SNAKE_CASE ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get("trim_offsets" ,_SCREAMING_SNAKE_CASE ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(_SCREAMING_SNAKE_CASE ,state.pop("type" ) )
_snake_case = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else value
_snake_case = value
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
_snake_case = kwargs.get("is_split_into_words" ,_SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
_snake_case = kwargs.get("is_split_into_words" ,_SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Tuple:
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 185
| 1
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
__SCREAMING_SNAKE_CASE : int = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__SCREAMING_SNAKE_CASE : Tuple = left
__SCREAMING_SNAKE_CASE : Any = point
elif point > right:
__SCREAMING_SNAKE_CASE : Tuple = right
__SCREAMING_SNAKE_CASE : Dict = point
else:
if item < current_item:
__SCREAMING_SNAKE_CASE : Optional[int] = point - 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = point + 1
return None
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if collection != sorted(_SCREAMING_SNAKE_CASE ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowercase = 0
if debug == 1:
lowercase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowercase = 67
lowercase = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 717
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a__ , a__ )
def a_ ( self , **a__ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , **a__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , **a__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Any = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(a__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE : Dict = processor(images=a__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Any = "lower newer"
__SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ )
__SCREAMING_SNAKE_CASE : int = tokenizer(a__ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[str] = "lower newer"
__SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(a__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Any = "lower newer"
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 564
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : List[Any] ={
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =["OwlViTFeatureExtractor"]
_lowercase : int =["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_lowercase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 136
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase : int =[
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Tuple=None) -> Tuple:
"""simple docstring"""
require_version(deps[pkg] , _lowercase)
| 136
| 1
|
def lowerCAmelCase_ ( lowerCamelCase ):
try:
__magic_name__ : int =float(lowerCamelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__magic_name__ : List[Any] =decimal - int(lowerCamelCase )
if fractional_part == 0:
return int(lowerCamelCase ), 1
else:
__magic_name__ : Union[str, Any] =len(str(lowerCamelCase ).split(""".""" )[1] )
__magic_name__ : int =int(decimal * (10**number_of_frac_digits) )
__magic_name__ : Any =10**number_of_frac_digits
__magic_name__ , __magic_name__ : str =denominator, numerator
while True:
__magic_name__ : int =dividend % divisor
if remainder == 0:
break
__magic_name__ , __magic_name__ : int =divisor, remainder
__magic_name__ , __magic_name__ : str =numerator / divisor, denominator / divisor
return int(lowerCamelCase ), int(lowerCamelCase )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction("67") = }""")
print(F"""{decimal_to_fraction("45.0") = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction("6.25") = }""")
print(F"""{decimal_to_fraction("78td") = }""")
| 705
|
import numpy as np
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1E-12 , lowerCamelCase = 100 , ):
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase ) == np.iscomplexobj(lowerCamelCase )
__magic_name__ : List[Any] =np.iscomplexobj(lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__magic_name__ : List[Any] =False
__magic_name__ : List[Any] =0
__magic_name__ : List[str] =0
__magic_name__ : str =1E12
while not convergence:
# Multiple matrix by the vector.
__magic_name__ : Dict =np.dot(lowerCamelCase , lowerCamelCase )
# Normalize the resulting output vector.
__magic_name__ : List[str] =w / np.linalg.norm(lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__magic_name__ : Any =vector.conj().T if is_complex else vector.T
__magic_name__ : List[str] =np.dot(lowerCamelCase , np.dot(lowerCamelCase , lowerCamelCase ) )
# Check convergence.
__magic_name__ : List[str] =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__magic_name__ : int =True
__magic_name__ : Optional[Any] =lambda_
if is_complex:
__magic_name__ : Union[str, Any] =np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[int] =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__magic_name__ : Any =np.array([41, 4, 20] )
__magic_name__ : Optional[Any] =real_input_matrix.astype(np.complexaaa )
__magic_name__ : str =np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__magic_name__ : List[Any] =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__magic_name__ : Union[str, Any] =real_input_matrix
__magic_name__ : List[Any] =real_vector
elif problem_type == "complex":
__magic_name__ : Optional[Any] =complex_input_matrix
__magic_name__ : Tuple =complex_vector
# Our implementation.
__magic_name__ , __magic_name__ : Any =power_iteration(lowerCamelCase , lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__magic_name__ , __magic_name__ : int =np.linalg.eigh(lowerCamelCase )
# Last eigenvalue is the maximum one.
__magic_name__ : List[str] =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__magic_name__ : Dict =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase ) - np.abs(lowerCamelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 367
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
lowerCamelCase : Tuple = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
lowerCamelCase : Any = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
with open(lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = collections.OrderedDict()
lowerCamelCase_ = collections.OrderedDict()
lowerCamelCase_ = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowercase ):
lowerCamelCase_ = b
lowerCamelCase_ = idx
for wd in b:
lowerCamelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : str , A_ : Any , A_ : Any , A_ : Optional[Any]="<|endoftext|>" , A_ : Any="<|endoftext|>" , A_ : Optional[int]="<|startoftext|>" , A_ : Union[str, Any]="<|endoftext|>" , A_ : Any=False , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , )
if not os.path.isfile(A_ ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(A_ ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCamelCase_ = do_clean_text
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = load_vocab_and_emoji(A_ , A_ )
lowerCamelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def a__ ( self : Optional[Any] , A_ : str ) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text )
def a__ ( self : Optional[int] , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def a__ ( self : Union[str, Any] , A_ : Union[str, Any] ) -> int:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(A_ )
def a__ ( self : Optional[int] , A_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ ).strip()
return out_string
def a__ ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ = 0
if os.path.isdir(A_ ):
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCamelCase_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCamelCase_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(','.join(A_ ) + '\n' )
index += 1
with open(A_ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , A_ )
return vocab_file, emoji_file
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , A_ : Union[str, Any] , A_ : int , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab # same as swe
lowerCamelCase_ = ids_to_tokens # same as bpe
lowerCamelCase_ = emoji
lowerCamelCase_ = np.max([len(A_ ) for w in self.vocab.keys()] )
lowerCamelCase_ = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCamelCase_ = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCamelCase_ = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCamelCase_ = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCamelCase_ = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCamelCase_ = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCamelCase_ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCamelCase_ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCamelCase_ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : str ) -> Optional[int]:
"""simple docstring"""
return len(self.ids_to_tokens )
def a__ ( self : Union[str, Any] , A_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.content_repattera.sub('<URL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<EMAIL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<TEL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<PRICE>' , A_ )
lowerCamelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCamelCase_ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def a__ ( self : int , A_ : Optional[Any] , A_ : Tuple=False ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = text.replace(' ' , '<SP>' )
lowerCamelCase_ = text.replace(' ' , '<SP>' )
lowerCamelCase_ = text.replace('\r\n' , '<BR>' )
lowerCamelCase_ = text.replace('\n' , '<BR>' )
lowerCamelCase_ = text.replace('\r' , '<BR>' )
lowerCamelCase_ = text.replace('\t' , '<TAB>' )
lowerCamelCase_ = text.replace('—' , 'ー' )
lowerCamelCase_ = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCamelCase_ = text.replace(A_ , A_ )
if clean:
lowerCamelCase_ = self.clean_text(A_ )
def check_simbol(A_ : Union[str, Any] ):
lowerCamelCase_ = x.encode()
if len(A_ ) == 1 and len(A_ ) == 2:
lowerCamelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(A_ : Tuple ):
lowerCamelCase_ = x.encode()
if len(A_ ) == 1 and len(A_ ) == 3:
lowerCamelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowerCamelCase_ = 0
lowerCamelCase_ = []
while pos < len(A_ ):
lowerCamelCase_ = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCamelCase_ = [] # (token_id, token, pos)
for e in range(A_ , A_ , -1 ):
lowerCamelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A_ ) > 2:
lowerCamelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A_ ) > 0:
# the smallest token_id is adopted
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sorted(A_ , key=lambda A_ : x[0] )[0]
result.append(A_ )
lowerCamelCase_ = e
else:
lowerCamelCase_ = pos + 1
lowerCamelCase_ = text[pos:end]
if check_simbol(A_ ):
result.append('<KIGOU>' )
elif checkuae(A_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCamelCase_ = end
return result
def a__ ( self : List[Any] , A_ : Tuple , A_ : List[str]="\n" ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) )
lowerCamelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(A_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(A_ )
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) )
lowerCamelCase_ = ''.join(A_ )
return text
| 70
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : float | Decimal , lowercase : float = 10**-10 ):
'''simple docstring'''
lowerCamelCase_ = a
while True:
lowerCamelCase_ = Decimal(lowercase ) - (
Decimal(eval(lowercase ) ) / Decimal(eval(str(diff(lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase ) ) < precision: # noqa: S307
return float(lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 70
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Any = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
pass
| 692
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 142
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : Tuple = """Hello, World!"""
__magic_name__ : Union[str, Any] = """en_XX"""
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = Path("data_bin" )
_snake_case = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE__ ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE__ )
_snake_case = xmod.model.encoder.sentence_encoder
_snake_case = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_snake_case = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE__ )
_snake_case = XmodForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_snake_case = xmod_sent_encoder.embed_tokens.weight
_snake_case = xmod_sent_encoder.embed_positions.weight
_snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_snake_case = xmod_sent_encoder.layernorm_embedding.weight
_snake_case = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_snake_case = model.roberta.encoder.layer[i]
_snake_case = xmod_sent_encoder.layers[i]
# self attention
_snake_case = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
_snake_case = xmod_layer.self_attn.q_proj.weight
_snake_case = xmod_layer.self_attn.q_proj.bias
_snake_case = xmod_layer.self_attn.k_proj.weight
_snake_case = xmod_layer.self_attn.k_proj.bias
_snake_case = xmod_layer.self_attn.v_proj.weight
_snake_case = xmod_layer.self_attn.v_proj.bias
# self-attention output
_snake_case = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
_snake_case = xmod_layer.self_attn.out_proj.weight
_snake_case = xmod_layer.self_attn.out_proj.bias
_snake_case = xmod_layer.self_attn_layer_norm.weight
_snake_case = xmod_layer.self_attn_layer_norm.bias
# intermediate
_snake_case = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
_snake_case = xmod_layer.fca.weight
_snake_case = xmod_layer.fca.bias
# output
_snake_case = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
_snake_case = xmod_layer.fca.weight
_snake_case = xmod_layer.fca.bias
_snake_case = xmod_layer.final_layer_norm.weight
_snake_case = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_snake_case = xmod_layer.adapter_layer_norm.weight
_snake_case = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_snake_case = bert_output.adapter_modules[lang_code]
_snake_case = xmod_layer.adapter_modules[lang_code]
_snake_case = from_adapter.fca.weight
_snake_case = from_adapter.fca.bias
_snake_case = from_adapter.fca.weight
_snake_case = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_snake_case = xmod_sent_encoder.layer_norm.weight
_snake_case = xmod_sent_encoder.layer_norm.bias
if classification_head:
_snake_case = xmod.model.classification_heads["mnli"].dense.weight
_snake_case = xmod.model.classification_heads["mnli"].dense.bias
_snake_case = xmod.model.classification_heads["mnli"].out_proj.weight
_snake_case = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
_snake_case = xmod.model.encoder.lm_head.dense.weight
_snake_case = xmod.model.encoder.lm_head.dense.bias
_snake_case = xmod.model.encoder.lm_head.layer_norm.weight
_snake_case = xmod.model.encoder.lm_head.layer_norm.bias
_snake_case = xmod.model.encoder.lm_head.weight
_snake_case = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_snake_case = xmod.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE__ )
_snake_case = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
_snake_case = xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
_snake_case = xmod.model(SCREAMING_SNAKE_CASE__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_snake_case = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__magic_name__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__magic_name__ : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 368
| 0
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _a ( __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = ReformerTokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = ReformerTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def _lowercase ( self ) -> Dict:
super().setUp()
_snake_case = ReformerTokenizer(_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ) -> str:
_snake_case = "<s>"
_snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Union[str, Any]:
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<unk>" )
self.assertEqual(vocab_keys[1] ,"<s>" )
self.assertEqual(vocab_keys[-1] ,"j" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,1_000 )
def _lowercase ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size ,1_000 )
def _lowercase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = "I was born in 92000, and this is falsé."
_snake_case = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
_snake_case = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
_snake_case = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(_SCREAMING_SNAKE_CASE )
_snake_case = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE=15 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
# Simple input
_snake_case = "This is a simple input"
_snake_case = ["This is a simple input 1", "This is a simple input 2"]
_snake_case = ("This is a simple input", "This is a pair")
_snake_case = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding="max_length" )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding="max_length" )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding="max_length" ,)
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding="max_length" )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding="max_length" )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,padding="max_length" ,)
def _lowercase ( self ) -> Tuple:
pass
def _lowercase ( self ) -> List[Any]:
_snake_case = ReformerTokenizer(_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,[285, 46, 10, 170, 382] ,)
_snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
_snake_case = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
_snake_case = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
@cached_property
def _lowercase ( self ) -> List[str]:
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowercase ( self ) -> Optional[int]:
_snake_case = "Hello World!"
_snake_case = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _lowercase ( self ) -> List[Any]:
_snake_case = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def _lowercase ( self ) -> str:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case = " ".join(_SCREAMING_SNAKE_CASE )
_snake_case = self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE ,return_tensors="pt" )
_snake_case = self.big_tokenizer.batch_encode_plus([sequence, sequence] ,return_tensors="pt" )
_snake_case = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case = encoded_sequence["input_ids"].shape
_snake_case = ReformerModel(_SCREAMING_SNAKE_CASE )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ) -> List[str]:
# fmt: off
_snake_case = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE ,model_name="google/reformer-crime-and-punishment" ,revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" ,padding=_SCREAMING_SNAKE_CASE ,sequences=_SCREAMING_SNAKE_CASE ,)
| 185
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __a ( _UpperCamelCase: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_snake_case = k.replace(_UpperCamelCase , _UpperCamelCase )
if k.startswith("encoder" ):
_snake_case = k.replace(".attn" , ".self_attn" )
_snake_case = k.replace("norm1" , "self_attn_layer_norm" )
_snake_case = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
_snake_case = k.replace("norm1" , "self_attn_layer_norm" )
_snake_case = k.replace("norm2" , "encoder_attn_layer_norm" )
_snake_case = k.replace("norm3" , "final_layer_norm" )
return k
def __a ( _UpperCamelCase: Dict ) -> Optional[int]:
"""simple docstring"""
_snake_case = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_snake_case = sd.pop(_UpperCamelCase )
_snake_case = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
_snake_case = v
UpperCamelCase_ : Union[str, Any] = ['''START''']
@torch.no_grad()
def __a ( _UpperCamelCase: Any , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_snake_case = torch.load(_UpperCamelCase , map_location="cpu" )
_snake_case = model["model"]
_snake_case = BlenderbotConfig.from_json_file(_UpperCamelCase )
_snake_case = BlenderbotForConditionalGeneration(_UpperCamelCase )
_snake_case = m.model.state_dict().keys()
_snake_case = []
_snake_case = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_snake_case = rename_state_dict_key(_UpperCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_snake_case = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_UpperCamelCase )
m.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
m.half()
m.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
UpperCamelCase_ : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 185
| 1
|
"""simple docstring"""
def lowercase ( __snake_case : list , __snake_case : list ):
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(__snake_case , __snake_case ) ) )
def lowercase ( __snake_case : list[float] ):
if point:
if isinstance(__snake_case , __snake_case ):
for item in point:
if not isinstance(__snake_case , (int, float) ):
lowercase_ : Optional[int] = (
'''Expected a list of numbers as input, found '''
F'''{type(__snake_case ).__name__}'''
)
raise TypeError(__snake_case )
else:
lowercase_ : str = F'''Expected a list of numbers as input, found {type(__snake_case ).__name__}'''
raise TypeError(__snake_case )
else:
raise ValueError('''Missing an input''' )
def lowercase ( __snake_case : list , __snake_case : list ):
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(__snake_case , __snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
lowercase_ : Any = [1]
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = 0, 0, 0
lowercase_ : Optional[int] = ugly_nums[ia] * 2
lowercase_ : int = ugly_nums[ia] * 3
lowercase_ : List[str] = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
lowercase_ : List[Any] = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
lowercase_ : Optional[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase_ : str = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase_ : Dict = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 141
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase_ ( lowerCAmelCase__ : Callable[[int | float], int | float] , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int | float , lowerCAmelCase__ : int = 100 , ) -> float:
'''simple docstring'''
A = x_start
A = fnc(lowerCAmelCase__ )
A = 0.0
for _ in range(lowerCAmelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(lowerCAmelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
__snake_case :Any =10
while i <= 100000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 106
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case__ = ['''text''', '''image''', '''audio''']
def lowerCamelCase__ ( a : List[str] ) -> Tuple:
"""simple docstring"""
a__ :Any = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(a , a ):
inputs.append(create_inputs(a ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowerCamelCase__ ( a : List ) -> str:
"""simple docstring"""
a__ :Any = []
for output in outputs:
if isinstance(a , (str, AgentText) ):
output_types.append("text" )
elif isinstance(a , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(a , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase_ :
def _snake_case ( self : str ) ->Tuple:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
a__ :Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , __A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
a__ :Optional[int] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self : List[Any] ) ->Any:
"""simple docstring"""
a__ :Union[str, Any] = create_inputs(self.tool.inputs )
a__ :Dict = self.tool(*__A )
# There is a single output
if len(self.tool.outputs ) == 1:
a__ :Optional[Any] = [outputs]
self.assertListEqual(output_types(__A ) , self.tool.outputs )
def _snake_case ( self : Tuple ) ->Dict:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _snake_case ( self : str ) ->List[Any]:
"""simple docstring"""
a__ :Any = create_inputs(self.tool.inputs )
a__ :Union[str, Any] = self.tool(*__A )
if not isinstance(__A , __A ):
a__ :List[str] = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) )
for output, output_type in zip(__A , self.tool.outputs ):
a__ :Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__A , __A ) )
def _snake_case ( self : Dict ) ->List[str]:
"""simple docstring"""
a__ :Tuple = create_inputs(self.tool.inputs )
a__ :Tuple = []
for _input, input_type in zip(__A , self.tool.inputs ):
if isinstance(__A , __A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
a__ :int = self.tool(*__A )
if not isinstance(__A , __A ):
a__ :Any = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) )
| 395
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def A_ ( _lowerCAmelCase : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
_lowerCamelCase : List[str] = nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = nums[i]
_lowerCamelCase : Tuple = max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number of elements : ').strip())
UpperCAmelCase_ : Tuple = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 701
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase = 256
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["melgan"]
def __init__( self :List[Any] , __A :SpectrogramNotesEncoder , __A :SpectrogramContEncoder , __A :TaFilmDecoder , __A :DDPMScheduler , __A :OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE__ = math.log(1E-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE__ = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE__ = 128
self.register_modules(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
def _snake_case ( self :str , __A :List[Any] , __A :Optional[int]=(-1.0, 1.0) , __A :Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output_range
if clip:
SCREAMING_SNAKE_CASE__ = torch.clip(__A , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self :Dict , __A :Tuple , __A :str=(-1.0, 1.0) , __A :List[str]=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_range
SCREAMING_SNAKE_CASE__ = torch.clip(__A , __A , __A ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self :Union[str, Any] , __A :Any , __A :List[Any] , __A :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = input_tokens > 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.notes_encoder(
encoder_input_tokens=__A , encoder_inputs_mask=__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.continuous_encoder(
encoder_inputs=__A , encoder_inputs_mask=__A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self :Any , __A :int , __A :str , __A :Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = noise_time
if not torch.is_tensor(__A ):
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ = self.decoder(
encodings_and_masks=__A , decoder_input_tokens=__A , decoder_noise_time=__A )
return logits
@torch.no_grad()
def __call__( self :Dict , __A :List[List[int]] , __A :Optional[torch.Generator] = None , __A :int = 100 , __A :bool = True , __A :str = "numpy" , __A :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A :int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
SCREAMING_SNAKE_CASE__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
for i, encoder_input_tokens in enumerate(__A ):
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE__ = ones
SCREAMING_SNAKE_CASE__ = self.scale_features(
__A , output_range=[-1.0, 1.0] , clip=__A )
SCREAMING_SNAKE_CASE__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__A , continuous_mask=__A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ = self.decode(
encodings_and_masks=__A , input_tokens=__A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , generator=__A ).prev_sample
SCREAMING_SNAKE_CASE__ = self.scale_to_features(__A , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE__ = mel[:1]
SCREAMING_SNAKE_CASE__ = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A )
logger.info("""Generated segment""" , __A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
SCREAMING_SNAKE_CASE__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__A )
| 6
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( __lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE__ : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE__ : Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def snake_case_ ( self , _lowerCAmelCase=False ):
"""simple docstring"""
if class_cond:
__SCREAMING_SNAKE_CASE: List[Any] = self.dummy_cond_unet
else:
__SCREAMING_SNAKE_CASE: Tuple = self.dummy_uncond_unet
# Default to CM multistep sampler
__SCREAMING_SNAKE_CASE: Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
"""simple docstring"""
if str(_lowerCAmelCase ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE: List[str] = torch.manual_seed(_lowerCAmelCase )
else:
__SCREAMING_SNAKE_CASE: List[str] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE: Optional[Any] = ConsistencyModelPipeline(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE: int = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Optional[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE: str = self.get_dummy_components(class_cond=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = ConsistencyModelPipeline(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = self.get_dummy_inputs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = 0
__SCREAMING_SNAKE_CASE: Optional[Any] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE: Optional[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Optional[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE: Tuple = ConsistencyModelPipeline(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = self.get_dummy_inputs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = 1
__SCREAMING_SNAKE_CASE: Union[str, Any] = None
__SCREAMING_SNAKE_CASE: List[str] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE: Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Optional[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE: Optional[Any] = self.get_dummy_components(class_cond=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = ConsistencyModelPipeline(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = self.get_dummy_inputs(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = 1
__SCREAMING_SNAKE_CASE: List[Any] = None
__SCREAMING_SNAKE_CASE: Tuple = 0
__SCREAMING_SNAKE_CASE: List[str] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE: List[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Any = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , _lowerCAmelCase=0 , _lowerCAmelCase=False , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=(1, 3, 64, 64) ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__SCREAMING_SNAKE_CASE: Dict = self.get_fixed_latents(seed=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase , shape=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = latents
return inputs
def snake_case_ ( self , _lowerCAmelCase=0 , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=(1, 3, 64, 64) ):
"""simple docstring"""
if type(_lowerCAmelCase ) == str:
__SCREAMING_SNAKE_CASE: List[Any] = torch.device(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
return latents
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: Optional[int] = ConsistencyModelPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(torch_device=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = self.get_inputs()
__SCREAMING_SNAKE_CASE: List[Any] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Any = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE: str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: Union[str, Any] = ConsistencyModelPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(torch_device=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = self.get_inputs()
__SCREAMING_SNAKE_CASE: Union[str, Any] = 1
__SCREAMING_SNAKE_CASE: List[Any] = None
__SCREAMING_SNAKE_CASE: Optional[int] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Any = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: List[Any] = ConsistencyModelPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(torch_device=_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = self.get_inputs(get_fixed_latents=_lowerCAmelCase , device=_lowerCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCAmelCase , enable_math=_lowerCAmelCase , enable_mem_efficient=_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Dict = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: List[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE: List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE: List[Any] = ConsistencyModelPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(torch_device=_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = self.get_inputs(get_fixed_latents=_lowerCAmelCase , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = 1
__SCREAMING_SNAKE_CASE: List[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCAmelCase , enable_math=_lowerCAmelCase , enable_mem_efficient=_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Optional[int] = pipe(**_lowerCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Tuple = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 202
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""note_seq"""]
def __init__( self : int ,*_a : Optional[int] ,**_a : Any ):
'''simple docstring'''
requires_backends(self ,["""note_seq"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : Optional[Any] ,**_a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,["""note_seq"""] )
@classmethod
def _a ( cls : Any ,*_a : Any ,**_a : List[str] ):
'''simple docstring'''
requires_backends(cls ,["""note_seq"""] )
| 27
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : Collection[float] | None = None ) -> List[Any]:
if components is None:
lowerCAmelCase :Tuple = []
lowerCAmelCase :Optional[int] = list(__lowerCamelCase )
def __len__( self : str ) -> Optional[Any]:
return len(self.__components )
def __str__( self : str ) -> Union[str, Any]:
return "(" + ",".join(map(__lowerCamelCase , self.__components ) ) + ")"
def __add__( self : int , UpperCAmelCase : Vector ) -> Union[str, Any]:
lowerCAmelCase :int = len(self )
if size == len(__lowerCamelCase ):
lowerCAmelCase :Any = [self.__components[i] + other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )]
return Vector(__lowerCamelCase )
else:
raise Exception('must have the same size' )
def __sub__( self : Any , UpperCAmelCase : Vector ) -> Union[str, Any]:
lowerCAmelCase :str = len(self )
if size == len(__lowerCamelCase ):
lowerCAmelCase :List[Any] = [self.__components[i] - other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )]
return Vector(__lowerCamelCase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Optional[int] , UpperCAmelCase : float ) -> List[Any]:
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase : Vector ) -> int:
...
def __mul__( self : List[Any] , UpperCAmelCase : float | Vector ) -> Optional[Any]:
if isinstance(__lowerCamelCase , (float, int) ):
lowerCAmelCase :str = [c * other for c in self.__components]
return Vector(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and len(self ) == len(__lowerCamelCase ):
lowerCAmelCase :List[str] = len(self )
lowerCAmelCase :Tuple = [self.__components[i] * other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )]
return sum(__lowerCamelCase )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
return Vector(self.__components )
def UpperCAmelCase__ ( self : Tuple , UpperCAmelCase : int ) -> Any:
if isinstance(__lowerCamelCase , __lowerCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : float ) -> int:
assert -len(self.__components ) <= pos < len(self.__components )
lowerCAmelCase :Tuple = value
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
lowerCAmelCase :Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(__lowerCamelCase ) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : Vector , UpperCAmelCase : bool = False ) -> Dict:
lowerCAmelCase :int = self * other
lowerCAmelCase :List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
return Vector([0] * dimension )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ))
lowerCAmelCase :Dict = [0] * dimension
lowerCAmelCase :Any = 1
return Vector(lowerCAmelCase_ )
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (isinstance(lowerCAmelCase_ , (int, float) ))
)
return x * scalar + y
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
random.seed(lowerCAmelCase_ )
lowerCAmelCase :str = [random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
class __UpperCamelCase :
def __init__( self : int , UpperCAmelCase : list[list[float]] , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
lowerCAmelCase :Dict = matrix
lowerCAmelCase :Optional[int] = w
lowerCAmelCase :List[str] = h
def __str__( self : Dict ) -> str:
lowerCAmelCase :int = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , UpperCAmelCase : Matrix ) -> int:
if self.__width == other.width() and self.__height == other.height():
lowerCAmelCase :Union[str, Any] = []
for i in range(self.__height ):
lowerCAmelCase :List[Any] = [
self.__matrix[i][j] + other.component(__lowerCamelCase , __lowerCamelCase )
for j in range(self.__width )
]
matrix.append(__lowerCamelCase )
return Matrix(__lowerCamelCase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Optional[int] , UpperCAmelCase : Matrix ) -> Any:
if self.__width == other.width() and self.__height == other.height():
lowerCAmelCase :int = []
for i in range(self.__height ):
lowerCAmelCase :List[str] = [
self.__matrix[i][j] - other.component(__lowerCamelCase , __lowerCamelCase )
for j in range(self.__width )
]
matrix.append(__lowerCamelCase )
return Matrix(__lowerCamelCase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : List[Any] , UpperCAmelCase : float ) -> int:
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase : Vector ) -> Union[str, Any]:
...
def __mul__( self : Tuple , UpperCAmelCase : float | Vector ) -> Optional[int]:
if isinstance(__lowerCamelCase , __lowerCamelCase ): # matrix-vector
if len(__lowerCamelCase ) == self.__width:
lowerCAmelCase :int = zero_vector(self.__height )
for i in range(self.__height ):
lowerCAmelCase :Any = [
self.__matrix[i][j] * other.component(__lowerCamelCase )
for j in range(self.__width )
]
ans.change_component(__lowerCamelCase , sum(__lowerCamelCase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(__lowerCamelCase , (int, float) ): # matrix-scalar
lowerCAmelCase :Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowerCamelCase , self.__width , self.__height )
return None
def UpperCAmelCase__ ( self : Any ) -> Dict:
return self.__height
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
return self.__width
def UpperCAmelCase__ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : int ) -> Union[str, Any]:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float ) -> List[Any]:
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCAmelCase :Union[str, Any] = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> Dict:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
lowerCAmelCase :Tuple = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowerCamelCase ) ):
lowerCAmelCase :Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowerCamelCase , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int ) -> List[str]:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowerCamelCase , __lowerCamelCase )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCAmelCase :Any = [
self.__matrix[0][y] * self.cofactor(0 , __lowerCamelCase ) for y in range(self.__width )
]
return sum(__lowerCamelCase )
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :Any = [[0] * n for _ in range(lowerCAmelCase_ )]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
random.seed(lowerCAmelCase_ )
lowerCAmelCase :Optional[int] = [
[random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )
]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 553
|
"""simple docstring"""
import operator
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None ) -> list:
_snake_case = operator.lt if reverse else operator.gt
_snake_case = solution or []
if not arr:
return solution
_snake_case = [arr.pop(0 )]
for i, item in enumerate(lowerCAmelCase_ ):
if _operator(lowerCAmelCase_ , sublist[-1] ):
sublist.append(lowerCAmelCase_ )
arr.pop(lowerCAmelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCAmelCase_ )
else:
while sublist:
_snake_case = sublist.pop(0 )
for i, xx in enumerate(lowerCAmelCase_ ):
if not _operator(lowerCAmelCase_ , lowerCAmelCase_ ):
solution.insert(lowerCAmelCase_ , lowerCAmelCase_ )
break
else:
solution.append(lowerCAmelCase_ )
strand_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 103
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
def __init__(self : Any , A__ : int , A__ : int , A__ : float , **A__ : int ) -> List[str]:
lowercase = feature_size
lowercase = sampling_rate
lowercase = padding_value
lowercase = kwargs.pop("padding_side" , "right" )
lowercase = kwargs.pop("return_attention_mask" , A__ )
super().__init__(**A__ )
def UpperCAmelCase__ (self : Tuple , A__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A__ : Union[bool, str, PaddingStrategy] = True , A__ : Optional[int] = None , A__ : bool = False , A__ : Optional[int] = None , A__ : Optional[bool] = None , A__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
lowercase = processed_features[self.model_input_names[0]]
lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
lowercase = "tf"
elif is_torch_tensor(A__ ):
lowercase = "pt"
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
lowercase = "np"
else:
raise ValueError(
f'type of {first_element} unknown: {type(A__ )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowercase = to_numpy(A__ )
else:
lowercase = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase = self._get_padding_strategies(padding=A__ , max_length=A__ )
lowercase = processed_features[self.model_input_names[0]]
lowercase = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
lowercase = []
for i in range(A__ ):
lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase = PaddingStrategy.MAX_LENGTH
lowercase = {}
for i in range(A__ ):
# padding
lowercase = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase = []
if value.dtype is np.dtype(np.floataa ):
lowercase = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Union[Dict[str, np.ndarray], BatchFeature] , A__ : Optional[int] = None , A__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A__ : Optional[int] = None , A__ : Optional[bool] = None , ) -> dict:
lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
lowercase = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
lowercase = np.pad(
processed_features["attention_mask"] , (0, difference) )
lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase = np.pad(
A__ , A__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase = np.pad(
processed_features["attention_mask"] , (difference, 0) )
lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase = np.pad(
A__ , A__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ (self : Dict , A__ : Union[Dict[str, np.ndarray], BatchFeature] , A__ : Optional[int] = None , A__ : Optional[int] = None , A__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = len(A__ ) > max_length
if needs_to_be_truncated:
lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase__ (self : int , A__ : Optional[Any]=False , A__ : Dict=None ) -> Optional[int]:
# Get padding strategy
if padding is not False:
if padding is True:
lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
lowercase = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
lowercase = padding
else:
lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 700
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
def __init__(self : Tuple , *A__ : Union[str, Any] , **A__ : Optional[Any] ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , A__ , )
super().__init__(*A__ , **A__ )
| 459
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.