code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a ):
# Load configuration defined in the metadata file
with open(a ) as metadata_file:
__a = json.load(a )
__a = LukeConfig(use_entity_aware_attention=a , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__a = torch.load(a , map_location="cpu" )["module"]
# Load the entity vocab file
__a = load_original_entity_vocab(a )
# add an entry for [MASK2]
__a = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__a = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__a = AddedToken("<ent>" , lstrip=a , rstrip=a )
__a = AddedToken("<ent2>" , lstrip=a , rstrip=a )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(a )
with open(os.path.join(a , "tokenizer_config.json" ) , "r" ) as f:
__a = json.load(a )
__a = "MLukeTokenizer"
with open(os.path.join(a , "tokenizer_config.json" ) , "w" ) as f:
json.dump(a , a )
with open(os.path.join(a , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(a , a )
__a = MLukeTokenizer.from_pretrained(a )
# Initialize the embeddings of the special tokens
__a = tokenizer.convert_tokens_to_ids(["@"] )[0]
__a = tokenizer.convert_tokens_to_ids(["#"] )[0]
__a = state_dict["embeddings.word_embeddings.weight"]
__a = word_emb[ent_init_index].unsqueeze(0 )
__a = word_emb[enta_init_index].unsqueeze(0 )
__a = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__a = state_dict[bias_name]
__a = decoder_bias[ent_init_index].unsqueeze(0 )
__a = decoder_bias[enta_init_index].unsqueeze(0 )
__a = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__a = F"encoder.layer.{layer_index}.attention.self."
__a = state_dict[prefix + matrix_name]
__a = state_dict[prefix + matrix_name]
__a = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__a = state_dict["entity_embeddings.entity_embeddings.weight"]
__a = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__a = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__a = state_dict["entity_predictions.bias"]
__a = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__a = torch.cat([entity_prediction_bias, entity_mask_bias] )
__a = LukeForMaskedLM(config=a ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__a = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__a = state_dict[key]
else:
__a = state_dict[key]
__a , __a = model.load_state_dict(a , strict=a )
if set(a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__a = MLukeTokenizer.from_pretrained(a , task="entity_classification" )
__a = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__a = (0, 9)
__a = tokenizer(a , entity_spans=[span] , return_tensors="pt" )
__a = model(**a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__a = torch.Size((1, 3_3, 7_6_8) )
__a = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__a = torch.Size((1, 1, 7_6_8) )
__a = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__a = MLukeTokenizer.from_pretrained(a )
__a = "Tokyo is the capital of <mask>."
__a = (2_4, 3_0)
__a = tokenizer(a , entity_spans=[span] , return_tensors="pt" )
__a = model(**a )
__a = encoding["input_ids"][0].tolist()
__a = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__a = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(a )
__a = outputs.entity_logits[0][0].argmax().item()
__a = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(a ) )
model.save_pretrained(a )
def _lowerCamelCase( a ):
__a = ["[MASK]", "[PAD]", "[UNK]"]
__a = [json.loads(a ) for line in open(a )]
__a = {}
for entry in data:
__a = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__a = entity_id
break
__a = F"{language}:{entity_name}"
__a = entity_id
return new_mapping
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 261
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : str = args.pruning_method
lowercase_ : Dict = args.threshold
lowercase_ : Tuple = args.model_name_or_path.rstrip('/' )
lowercase_ : Union[str, Any] = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
lowercase_ : Any = torch.load(os.path.join(__SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
lowercase_ : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase_ : int = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
lowercase_ : List[str] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
lowercase_ : int = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
lowercase_ : List[str] = MagnitudeBinarizer.apply(inputs=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase_ : Tuple = name[:-6]
lowercase_ : Any = model[F'''{prefix_}mask_scores''']
lowercase_ : int = TopKBinarizer.apply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase_ : List[Any] = name[:-6]
lowercase_ : Union[str, Any] = model[F'''{prefix_}mask_scores''']
lowercase_ : List[str] = ThresholdBinarizer.apply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase_ : List[str] = name[:-6]
lowercase_ : Dict = model[F'''{prefix_}mask_scores''']
lowercase_ : str = -0.1, 1.1
lowercase_ : Any = torch.sigmoid(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = s * (r - l) + l
lowercase_ : Union[str, Any] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase_ : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowercase_ : Dict = os.path.join(
os.path.dirname(__SCREAMING_SNAKE_CASE ) , F'''bertarized_{os.path.basename(__SCREAMING_SNAKE_CASE )}''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
shutil.copytree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
main(args)
| 364
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321
| 0
|
"""simple docstring"""
from collections import defaultdict
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : str ) -> bool:
_UpperCAmelCase : Optional[Any] = first_str.lower().strip()
_UpperCAmelCase : Any = second_str.lower().strip()
# Remove whitespace
_UpperCAmelCase : str = first_str.replace(""" """, """""" )
_UpperCAmelCase : Tuple = second_str.replace(""" """, """""" )
# Strings of different lengths are not anagrams
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
# Default values for count should be 0
_UpperCAmelCase : defaultdict[str, int] = defaultdict(_lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase__ : Optional[Any] = input('''Enter the first string ''').strip()
lowerCamelCase__ : str = input('''Enter the second string ''').strip()
lowerCamelCase__ : List[str] = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 246
|
"""simple docstring"""
import random
class _UpperCAmelCase :
@staticmethod
def __snake_case ( _A ) -> tuple[list[int], list[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [ord(_A ) for i in text]
_UpperCAmelCase : str = []
_UpperCAmelCase : int = []
for i in plain:
_UpperCAmelCase : List[str] = random.randint(1 , 3_00 )
_UpperCAmelCase : Any = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def __snake_case ( _A , _A ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for i in range(len(_A ) ):
_UpperCAmelCase : List[Any] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ : List[Any] = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 246
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCAmelCase__ : int = str(bin(lowerCamelCase__ ) )[2:] # remove the leading "0b"
lowerCAmelCase__ : Optional[Any] = str(bin(lowerCamelCase__ ) )[2:]
lowerCAmelCase__ : Tuple = max(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase__ ) , b_binary.zfill(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = 0
for ch in input_str:
lowerCAmelCase__ : Any = ord(A_ )
lowerCAmelCase__ : Any = pow(2 , A_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A = None , __A = None , __A = None , __A = None , __A = False , __A = False , __A = None , **__A , ) -> List[str]:
lowerCAmelCase_ :List[str] = path_or_paths
lowerCAmelCase_ :int = split if split or isinstance(__A , __A ) else """train"""
lowerCAmelCase_ :Tuple = features
lowerCAmelCase_ :str = cache_dir
lowerCAmelCase_ :int = keep_in_memory
lowerCAmelCase_ :Tuple = streaming
lowerCAmelCase_ :Optional[int] = num_proc
lowerCAmelCase_ :Optional[int] = kwargs
@abstractmethod
def __lowerCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A = None , __A = None , __A = False , __A = False , __A = None , **__A , ) -> int:
lowerCAmelCase_ :List[Any] = features
lowerCAmelCase_ :str = cache_dir
lowerCAmelCase_ :List[str] = keep_in_memory
lowerCAmelCase_ :Union[str, Any] = streaming
lowerCAmelCase_ :List[Any] = num_proc
lowerCAmelCase_ :List[str] = kwargs
@abstractmethod
def __lowerCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 84
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
a_ : Union[str, Any] = random.Random()
def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str:
"""simple docstring"""
if rng is None:
lowerCamelCase_ =global_rng
lowerCamelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =min_seq_length
lowerCamelCase_ =max_seq_length
lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ =feature_size
lowerCamelCase_ =num_mel_bins
lowerCamelCase_ =padding_value
lowerCamelCase_ =sampling_rate
lowerCamelCase_ =return_attention_mask
lowerCamelCase_ =do_normalize
def lowercase__ ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features
lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
# Test batched
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ =np.asarray(lowerCAmelCase )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase_ =[None, 16, None]
for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase_ =[None, 16, None]
for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa )
lowerCamelCase_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
from datasets import load_dataset
lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
lowerCamelCase_ =self._load_datasamples(1 )
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
| 75
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : List[Any] = 2_5_0_0_0_4
_UpperCAmelCase : Union[str, Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = MBartTokenizer
__UpperCamelCase : Union[str, Any] = MBartTokenizerFast
__UpperCamelCase : int = True
__UpperCamelCase : Tuple = True
def _snake_case (self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = MBartTokenizer(__lowercase , keep_accents=__lowercase )
__lowerCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case (self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowerCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
__lowerCAmelCase = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__lowercase )
__lowerCAmelCase = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = 'facebook/mbart-large-en-ro'
__UpperCamelCase : Tuple = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCamelCase : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _snake_case (cls ):
__lowerCAmelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__lowerCAmelCase = 1
return cls
def _snake_case (self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def _snake_case (self ):
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
__lowerCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowerCAmelCase = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , __lowercase )
__lowerCAmelCase = 10
__lowerCAmelCase = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def _snake_case (self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
__lowerCAmelCase = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
__lowerCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowerCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
__lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=10 , return_tensors='''pt''' )
__lowerCAmelCase = targets['''input_ids''']
__lowerCAmelCase = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 9
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = do_resize
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88}
__lowerCAmelCase = size_divisor
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_pad
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
def _snake_case (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _snake_case (self , __lowercase , __lowercase=False ):
if not batched:
__lowerCAmelCase = self.size['''shortest_edge''']
__lowerCAmelCase = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
__lowerCAmelCase = size / min(__lowercase , __lowercase )
if h < w:
__lowerCAmelCase , __lowerCAmelCase = size, scale * w
else:
__lowerCAmelCase , __lowerCAmelCase = scale * h, size
__lowerCAmelCase = int((13_33 / 8_00) * size )
if max(__lowercase , __lowercase ) > max_size:
__lowerCAmelCase = max_size / max(__lowercase , __lowercase )
__lowerCAmelCase = newh * scale
__lowerCAmelCase = neww * scale
__lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCAmelCase , __lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0]
__lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None
def _snake_case (self ):
__lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def _snake_case (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case (self ):
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) )
def _snake_case (self ):
pass
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case (self ):
# Initialize image processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 9
| 1
|
'''simple docstring'''
def _A ( snake_case , snake_case ) -> Dict:
assert x is not None
assert y is not None
_lowercase : Dict = len(snake_case )
_lowercase : List[str] = len(snake_case )
# declaring the array for storing the dp values
_lowercase : Optional[int] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowercase : Optional[Any] = 1 if x[i - 1] == y[j - 1] else 0
_lowercase : Optional[int] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowercase : Union[str, Any] = ""
_lowercase , _lowercase : Union[str, Any] = m, n
while i > 0 and j > 0:
_lowercase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowercase : Union[str, Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_snake_case = 'AGGTAB'
_snake_case = 'GXTXAYB'
_snake_case = 4
_snake_case = 'GTAB'
_snake_case , _snake_case = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 250
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = ['pixel_values']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : Dict = size if size is not None else {"shortest_edge": 224}
_lowercase : List[Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
_lowercase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : Tuple = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name="crop_size" )
_lowercase : List[str] = do_resize
_lowercase : Dict = size
_lowercase : Any = resample
_lowercase : int = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Tuple = do_rescale
_lowercase : Any = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowercase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_lowercase : Optional[int] = do_convert_rgb
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : List[str] = get_resize_output_image_size(_UpperCamelCase , size=size["shortest_edge"] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
_lowercase : Union[str, Any] = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(_UpperCamelCase , param_name="size" , default_to_square=_UpperCamelCase )
_lowercase : List[Any] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Tuple = get_size_dict(_UpperCamelCase , param_name="crop_size" , default_to_square=_UpperCamelCase )
_lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : Dict = image_std if image_std is not None else self.image_std
_lowercase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase : str = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase : List[Any] = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
_lowercase : List[Any] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_lowercase : Optional[int] = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_lowercase : List[Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_lowercase : List[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_lowercase : Dict = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 250
| 1
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = RobertaConfig
__UpperCamelCase = "roberta"
def __init__( self : List[str] , lowercase_ : Tuple):
'''simple docstring'''
super().__init__(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = RobertaEmbeddings(lowercase_)
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = RobertaConfig
__UpperCamelCase = "roberta"
def __init__( self : Any , lowercase_ : List[str]):
'''simple docstring'''
super().__init__(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = config.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = config.num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = DeeRobertaModel(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = nn.Dropout(config.hidden_dropout_prob)
SCREAMING_SNAKE_CASE_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Tuple=None , lowercase_ : Dict=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=-1 , lowercase_ : str=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.num_layers
try:
SCREAMING_SNAKE_CASE_ : Dict = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Dict = outputs[1]
SCREAMING_SNAKE_CASE_ : List[str] = self.dropout(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.classifier(lowercase_)
SCREAMING_SNAKE_CASE_ : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE_ : List[Any] = e.message
SCREAMING_SNAKE_CASE_ : List[Any] = e.exit_layer
SCREAMING_SNAKE_CASE_ : List[str] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE_ : Dict = entropy(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ : Optional[Any] = MSELoss()
SCREAMING_SNAKE_CASE_ : Optional[int] = loss_fct(logits.view(-1) , labels.view(-1))
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
SCREAMING_SNAKE_CASE_ : Tuple = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ : Optional[Any] = MSELoss()
SCREAMING_SNAKE_CASE_ : List[Any] = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
SCREAMING_SNAKE_CASE_ : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Any = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(lowercase_)
if train_highway:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE_ : Dict = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE_ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE_ : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 318
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182
|
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321
| 0
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
# TODO Update this
lowercase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """esm"""
def __init__( self : Any , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : int=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Tuple=1e-1_2 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : int=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : str=False , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , mask_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = vocab_size
snake_case : Union[str, Any] = hidden_size
snake_case : Optional[Any] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : Union[str, Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Union[str, Any] = position_embedding_type
snake_case : List[Any] = use_cache
snake_case : int = emb_layer_norm_before
snake_case : str = token_dropout
snake_case : Optional[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
snake_case : Tuple = EsmFoldConfig()
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case : List[Any] = EsmFoldConfig(**_SCREAMING_SNAKE_CASE )
snake_case : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
snake_case : Any = get_default_vocab_list()
else:
snake_case : str = vocab_list
else:
snake_case : List[Any] = None
snake_case : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _SCREAMING_SNAKE_CASE ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case : Union[str, Any] = super().to_dict()
if isinstance(self.esmfold_config , _SCREAMING_SNAKE_CASE ):
snake_case : Tuple = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = 0
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = 128
lowerCamelCase = None
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if self.trunk is None:
snake_case : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , _SCREAMING_SNAKE_CASE ):
snake_case : Any = TrunkConfig(**self.trunk )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
snake_case : Any = asdict(self )
snake_case : int = self.trunk.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
lowerCamelCase = 48
lowerCamelCase = 1024
lowerCamelCase = 128
lowerCamelCase = 32
lowerCamelCase = 32
lowerCamelCase = 32
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = False
lowerCamelCase = 4
lowerCamelCase = 128
lowerCamelCase = None
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.structure_module is None:
snake_case : Any = StructureModuleConfig()
elif isinstance(self.structure_module , _SCREAMING_SNAKE_CASE ):
snake_case : Any = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
snake_case : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
snake_case : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[Any] = asdict(self )
snake_case : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
lowerCamelCase = 384
lowerCamelCase = 128
lowerCamelCase = 16
lowerCamelCase = 128
lowerCamelCase = 12
lowerCamelCase = 4
lowerCamelCase = 8
lowerCamelCase = 0.1
lowerCamelCase = 8
lowerCamelCase = 1
lowerCamelCase = 2
lowerCamelCase = 7
lowerCamelCase = 10
lowerCamelCase = 1E-8
lowerCamelCase = 1E5
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return asdict(self )
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 371
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> list:
'''simple docstring'''
snake_case : Any = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case : Optional[int] = collection[i]
snake_case : str = 0
snake_case : List[Any] = i - 1
while low <= high:
snake_case : List[Any] = (low + high) // 2
if val < collection[mid]:
snake_case : List[str] = mid - 1
else:
snake_case : Optional[int] = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case : Dict = collection[j - 1]
snake_case : Union[str, Any] = val
return collection
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 83
| 0
|
"""simple docstring"""
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
| 74
| 0
|
from collections import deque
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->str:
A__ = len(UpperCamelCase__ )
A__ = deque()
A__ = [False for _ in range(UpperCamelCase__ )]
A__ = [-1 for _ in range(UpperCamelCase__ )]
A__ = index_of[:]
def strong_connect(UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
A__ = index # the number when this node is seen
A__ = index # lowest rank node reachable from here
index += 1
stack.append(UpperCamelCase__ )
A__ = True
for w in g[v]:
if index_of[w] == -1:
A__ = strong_connect(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A__ = []
A__ = stack.pop()
A__ = False
component.append(UpperCamelCase__ )
while w != v:
A__ = stack.pop()
A__ = False
component.append(UpperCamelCase__ )
components.append(UpperCamelCase__ )
return index
A__ = []
for v in range(UpperCamelCase__ ):
if index_of[v] == -1:
strong_connect(UpperCamelCase__ , 0 , UpperCamelCase__ )
return components
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple )->Union[str, Any]:
A__ = [[] for _ in range(UpperCamelCase__ )]
for u, v in edges:
g[u].append(UpperCamelCase__ )
return g
if __name__ == "__main__":
# Test
a__: Optional[Any] = 7
a__: Dict = [0, 0, 1, 2, 3, 3, 4, 4, 6]
a__: Optional[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
a__: Any = [(u, v) for u, v in zip(source, target)]
a__: List[Any] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 39
|
from __future__ import annotations
import time
import numpy as np
a__: Optional[Any] = [8, 5, 9, 7]
a__: Dict = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a__: List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def UpperCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase ( self ):
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
A__ = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def UpperCamelCase ( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCAmelCase : str =2_5_0_0_0_4
__lowerCAmelCase : Any =2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = MBartTokenizer
SCREAMING_SNAKE_CASE__ : str = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[int] = True
def __magic_name__( self :List[str] ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE : Any = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __magic_name__( self :Dict ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE : Dict = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : str = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''facebook/mbart-large-en-ro'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE__ : Any = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
SCREAMING_SNAKE_CASE__ : Tuple = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __magic_name__( cls :str ) -> int:
__SCREAMING_SNAKE_CASE : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__SCREAMING_SNAKE_CASE : List[Any] = 1
return cls
def __magic_name__( self :Optional[Any] ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020 )
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Tuple:
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
__SCREAMING_SNAKE_CASE : int = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 10
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> List[str]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250_026, 250_001] )
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = targets['''input_ids''']
__SCREAMING_SNAKE_CASE : Any = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__( self :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3_034, 2, 250_004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250_001,
} , )
| 9
|
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9
| 1
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
A = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __A ( a_ :List[Any]) -> List[Any]:
__a : List[Any] = {}
state_dict.pop('''pixel_mean''' , a_)
state_dict.pop('''pixel_std''' , a_)
__a : List[Any] = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a : int = key.replace(a_ , a_)
if re.match(a_ , a_):
__a : Optional[Any] = int(re.match(a_ , a_).group(2))
if layer_nb == 0:
__a : Any = key.replace('''layers.0''' , '''proj_in''')
elif layer_nb == 1:
__a : Dict = key.replace('''layers.1''' , '''layers.0''')
elif layer_nb == 2:
__a : Optional[int] = key.replace('''layers.2''' , '''proj_out''')
__a : int = value
__a : Union[str, Any] = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __A ( a_ :Optional[int] , a_ :Optional[Any] , a_ :Dict , a_ :Optional[int]="ybelkada/segment-anything") -> Dict:
__a : Dict = hf_hub_download(a_ , F"""checkpoints/{model_name}.pth""")
if "sam_vit_b" in model_name:
__a : List[str] = SamConfig()
elif "sam_vit_l" in model_name:
__a : List[Any] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__a : List[Any] = SamConfig(
vision_config=a_ , )
elif "sam_vit_h" in model_name:
__a : List[str] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__a : Optional[int] = SamConfig(
vision_config=a_ , )
__a : int = torch.load(a_ , map_location='''cpu''')
__a : Tuple = replace_keys(a_)
__a : Optional[int] = SamImageProcessor()
__a : Any = SamProcessor(image_processor=a_)
__a : Any = SamModel(a_)
hf_model.load_state_dict(a_)
__a : Dict = hf_model.to('''cuda''')
__a : Tuple = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__a : str = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''')
__a : Tuple = [[[4_00, 6_50]]]
__a : Tuple = [[1]]
__a : Tuple = processor(images=np.array(a_) , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
__a : str = hf_model(**a_)
__a : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
__a : Any = processor(
images=np.array(a_) , input_points=a_ , input_labels=a_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
__a : Optional[int] = hf_model(**a_)
__a : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
__a : str = ((75, 2_75, 17_25, 8_50),)
__a : List[str] = processor(images=np.array(a_) , input_boxes=a_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
__a : Any = hf_model(**a_)
__a : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
__a : int = [[[4_00, 6_50], [8_00, 6_50]]]
__a : Dict = [[1, 1]]
__a : Optional[Any] = processor(
images=np.array(a_) , input_points=a_ , input_labels=a_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
__a : int = hf_model(**a_)
__a : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
A = argparse.ArgumentParser()
A = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
A = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 367
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A = TypeVar('''T''')
class __lowercase ( Generic[T] ):
'''simple docstring'''
__lowerCAmelCase = 42 # Cache store of keys
__lowerCAmelCase = 42 # References of the keys in cache
__lowerCAmelCase = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ):
__a : Optional[int] = deque()
__a : Dict = set()
if not n:
__a : List[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__a : str = n
def _lowerCamelCase ( self , _UpperCAmelCase ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__a : int = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _lowerCamelCase ( self ):
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
A = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 188
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( _lowercase ):
lowerCamelCase : str = ["image_processor", "tokenizer"]
lowerCamelCase : Tuple = "LayoutLMv2ImageProcessor"
lowerCamelCase : Dict = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__(self , A=None , A=None , **A ):
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
lowerCamelCase_ : Any = kwargs.pop('''feature_extractor''' )
lowerCamelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
def __call__(self , A , A = None , A = None , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ : Optional[int] = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
lowerCamelCase_ : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ : Dict = features['''words''']
lowerCamelCase_ : Dict = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
lowerCamelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ : int = self.get_overflowing_images(A , encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ : Optional[int] = images
return encoded_inputs
def UpperCAmelCase__ (self , A , A ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase_ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(A )} and {len(A )}""" )
return images_with_overflow
def UpperCAmelCase__ (self , *A , **A ):
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase__ (self , *A , **A ):
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase__ (self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ (self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def UpperCAmelCase__ (self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 318
|
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318
| 1
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__UpperCAmelCase = re.compile(R"\b(a|an|the)\b", re.UNICODE)
__UpperCAmelCase = None
def A__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''', metavar='''data.json''', help='''Input data JSON file.''' )
parser.add_argument('''pred_file''', metavar='''pred.json''', help='''Model predictions.''' )
parser.add_argument(
'''--out-file''', '''-o''', metavar='''eval.json''', help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''', '''-n''', metavar='''na_prob.json''', help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''', '''-t''', type=__lowerCamelCase, default=1.0, help='''Predict "" if no-answer probability exceeds this (default = 1.0).''', )
parser.add_argument(
'''--out-image-dir''', '''-p''', metavar='''out_images''', default=__lowerCamelCase, help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''', '''-v''', action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE_ = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def A__ ( __lowerCamelCase ):
def remove_articles(__lowerCamelCase ):
return ARTICLES_REGEX.sub(''' ''', __lowerCamelCase )
def white_space_fix(__lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def A__ ( __lowerCamelCase ):
if not s:
return []
return normalize_answer(__lowerCamelCase ).split()
def A__ ( __lowerCamelCase, __lowerCamelCase ):
return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = get_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = get_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = collections.Counter(__lowerCamelCase ) & collections.Counter(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = sum(common.values() )
if len(__lowerCamelCase ) == 0 or len(__lowerCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = (2 * precision * recall) / (precision + recall)
return fa
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE_ = qa['''id''']
SCREAMING_SNAKE_CASE_ = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE_ = ['''''']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
SCREAMING_SNAKE_CASE_ = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE_ = max(compute_exact(__lowerCamelCase, __lowerCamelCase ) for a in gold_answers )
SCREAMING_SNAKE_CASE_ = max(compute_fa(__lowerCamelCase, __lowerCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE_ = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE_ = float(not qid_to_has_ans[qid] )
else:
SCREAMING_SNAKE_CASE_ = s
return new_scores
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
if not qid_list:
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_00.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for k in new_eval:
SCREAMING_SNAKE_CASE_ = new_eval[k]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
plt.step(__lowerCamelCase, __lowerCamelCase, color='''b''', alpha=0.2, where='''post''' )
plt.fill_between(__lowerCamelCase, __lowerCamelCase, step='''post''', alpha=0.2, color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCamelCase )
plt.savefig(__lowerCamelCase )
plt.clf()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None ):
SCREAMING_SNAKE_CASE_ = sorted(__lowerCamelCase, key=lambda __lowerCamelCase : na_probs[k] )
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = 1.0
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = [1.0]
SCREAMING_SNAKE_CASE_ = [0.0]
SCREAMING_SNAKE_CASE_ = 0.0
for i, qid in enumerate(__lowerCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE_ = true_pos / float(i + 1 )
SCREAMING_SNAKE_CASE_ = true_pos / float(__lowerCamelCase )
if i == len(__lowerCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCamelCase )
recalls.append(__lowerCamelCase )
if out_image:
plot_pr_curve(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return {"ap": 1_00.0 * avg_prec}
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if out_image_dir and not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE_ = make_precision_recall_eval(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, out_image=os.path.join(__lowerCamelCase, '''pr_exact.png''' ), title='''Precision-Recall curve for Exact Match score''', )
SCREAMING_SNAKE_CASE_ = make_precision_recall_eval(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, out_image=os.path.join(__lowerCamelCase, '''pr_f1.png''' ), title='''Precision-Recall curve for F1 score''', )
SCREAMING_SNAKE_CASE_ = {k: float(__lowerCamelCase ) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE_ = make_precision_recall_eval(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, out_image=os.path.join(__lowerCamelCase, '''pr_oracle.png''' ), title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''', )
merge_eval(__lowerCamelCase, __lowerCamelCase, '''pr_exact''' )
merge_eval(__lowerCamelCase, __lowerCamelCase, '''pr_f1''' )
merge_eval(__lowerCamelCase, __lowerCamelCase, '''pr_oracle''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if not qid_list:
return
SCREAMING_SNAKE_CASE_ = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE_ = np.ones_like(__lowerCamelCase ) / float(len(__lowerCamelCase ) )
plt.hist(__lowerCamelCase, weights=__lowerCamelCase, bins=20, range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCamelCase, F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
SCREAMING_SNAKE_CASE_ = num_no_ans
SCREAMING_SNAKE_CASE_ = cur_score
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = sorted(__lowerCamelCase, key=lambda __lowerCamelCase : na_probs[k] )
for i, qid in enumerate(__lowerCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE_ = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE_ = -1
else:
SCREAMING_SNAKE_CASE_ = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE_ = cur_score
SCREAMING_SNAKE_CASE_ = na_probs[qid]
return 1_00.0 * best_score / len(__lowerCamelCase ), best_thresh
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = find_best_thresh(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = find_best_thresh(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = best_exact
SCREAMING_SNAKE_CASE_ = exact_thresh
SCREAMING_SNAKE_CASE_ = best_fa
SCREAMING_SNAKE_CASE_ = fa_thresh
def A__ ( ):
with open(OPTS.data_file ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__lowerCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE_ = make_qid_to_has_ans(__lowerCamelCase ) # maps qid to True/False
SCREAMING_SNAKE_CASE_ = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE_ = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_raw_scores(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = apply_no_ans_threshold(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE_ = apply_no_ans_threshold(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE_ = make_eval_dict(__lowerCamelCase, __lowerCamelCase )
if has_ans_qids:
SCREAMING_SNAKE_CASE_ = make_eval_dict(__lowerCamelCase, __lowerCamelCase, qid_list=__lowerCamelCase )
merge_eval(__lowerCamelCase, __lowerCamelCase, '''HasAns''' )
if no_ans_qids:
SCREAMING_SNAKE_CASE_ = make_eval_dict(__lowerCamelCase, __lowerCamelCase, qid_list=__lowerCamelCase )
merge_eval(__lowerCamelCase, __lowerCamelCase, '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, OPTS.out_image_dir )
histogram_na_prob(__lowerCamelCase, __lowerCamelCase, OPTS.out_image_dir, '''hasAns''' )
histogram_na_prob(__lowerCamelCase, __lowerCamelCase, OPTS.out_image_dir, '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file, '''w''' ) as f:
json.dump(__lowerCamelCase, __lowerCamelCase )
else:
print(json.dumps(__lowerCamelCase, indent=2 ) )
if __name__ == "__main__":
__UpperCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 257
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="unispeech-sat"
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.02 , _A=1E-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.05 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ) -> Tuple:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = num_clusters
SCREAMING_SNAKE_CASE_ = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
SCREAMING_SNAKE_CASE_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ = num_codevector_groups
SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE_ = num_negatives
SCREAMING_SNAKE_CASE_ = codevector_dim
SCREAMING_SNAKE_CASE_ = proj_codevector_dim
SCREAMING_SNAKE_CASE_ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = xvector_output_dim
@property
def _UpperCamelCase ( self ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257
| 1
|
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowercase__ = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowercase__ = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def A__ ( ):
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments,) )
((_UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCamelCase : Any = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCamelCase : str = decoder_config.decoder_start_token_id
_UpperCamelCase : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCamelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCamelCase : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCamelCase : List[Any] = decoder_config.eos_token_id
_UpperCamelCase : Dict = decoder_start_token_id
_UpperCamelCase : int = pad_token_id
_UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 83
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a , _a = image.size
_a , _a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
_a = np.array(UpperCamelCase ).astype(np.floataa ) / 255.0
_a = image[None].transpose(0 , 3 , 1 , 2 )
_a = torch.from_numpy(UpperCamelCase )
return 2.0 * image - 1.0
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : VQModel , lowerCAmelCase_ : UNetaDModel , lowerCAmelCase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[int] = 1_00 , lowerCAmelCase_ : Optional[float] = 0.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a = 1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
_a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase_ )}' )
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a = preprocess(lowerCAmelCase_ )
_a , _a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_a = (batch_size, self.unet.config.in_channels // 2, height, width)
_a = next(self.unet.parameters() ).dtype
_a = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
_a = image.to(device=self.device , dtype=lowerCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
_a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
for t in self.progress_bar(lowerCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
_a = torch.cat([latents, image] , dim=1 )
_a = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
_a = self.vqvae.decode(lowerCAmelCase_ ).sample
_a = torch.clamp(lowerCAmelCase_ , -1.0 , 1.0 )
_a = image / 2 + 0.5
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 179
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=[] )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = size[0] - overlap_pixels * 2
_UpperCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_UpperCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_UpperCAmelCase = np.pad(__lowerCAmelCase , mode='linear_ramp' , pad_width=__lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
_UpperCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_UpperCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_UpperCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_UpperCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
"""simple docstring"""
return max(__lowerCAmelCase , min(__lowerCAmelCase , __lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = list(__lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_UpperCAmelCase = clamp_rect(__lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__lowerCAmelCase , (original_slice, 0) )
return result
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Any:
"""simple docstring"""
_UpperCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_UpperCAmelCase = tile.crop(__lowerCAmelCase )
return tile
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = n % d
return n - divisor
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 350 , ):
"""simple docstring"""
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_UpperCAmelCase = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
_UpperCAmelCase = image.crop(UpperCAmelCase )
_UpperCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_UpperCAmelCase = translated_slice_x - (original_image_slice / 2)
_UpperCAmelCase = max(0 , UpperCAmelCase )
_UpperCAmelCase = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = to_input.size
_UpperCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_UpperCAmelCase = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
_UpperCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_UpperCAmelCase = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_UpperCAmelCase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_UpperCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 75 , UpperCAmelCase = 9.0 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 128 , UpperCAmelCase = 32 , UpperCAmelCase = 32 , ):
"""simple docstring"""
_UpperCAmelCase = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
_UpperCAmelCase = math.ceil(image.size[0] / tile_size )
_UpperCAmelCase = math.ceil(image.size[1] / tile_size )
_UpperCAmelCase = tcx * tcy
_UpperCAmelCase = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def __A ( )-> int:
"""simple docstring"""
_UpperCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler'
_UpperCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(__lowerCAmelCase , revision='fp16' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to('cuda' )
_UpperCAmelCase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(__lowerCAmelCase ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
_UpperCAmelCase = pipe(image=__lowerCAmelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=__lowerCAmelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 39
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 1
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase__ ( *__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""",)
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCAmelCase = image_classifier(__SCREAMING_SNAKE_CASE,candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
],)
__lowerCAmelCase = image_classifier([image] * 5,candidate_labels=["""A""", """B""", """C"""],batch_size=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
],)
@require_tf
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""",framework="""tf""" )
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCAmelCase = image_classifier(__SCREAMING_SNAKE_CASE,candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],)
__lowerCAmelCase = image_classifier([image] * 5,candidate_labels=["""A""", """B""", """C"""],batch_size=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
{"""score""": 0.333, """label""": ANY(__SCREAMING_SNAKE_CASE )},
],
],)
@slow
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""",model="""openai/clip-vit-base-patch32""",)
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCAmelCase = image_classifier(__SCREAMING_SNAKE_CASE,candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],)
__lowerCAmelCase = image_classifier([image] * 5,candidate_labels=["""cat""", """plane""", """remote"""],batch_size=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5,)
@slow
@require_tf
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""",model="""openai/clip-vit-base-patch32""",framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCAmelCase = image_classifier(__SCREAMING_SNAKE_CASE,candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],)
__lowerCAmelCase = image_classifier([image] * 5,candidate_labels=["""cat""", """plane""", """remote"""],batch_size=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ),[
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5,)
| 46
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46
| 1
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def lowercase ( __magic_name__ , __magic_name__=1000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase : Any = n - 1
UpperCAmelCase : Optional[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase : str = 0
while count < prec:
UpperCAmelCase : Optional[int] = random.randint(2 , n - 1 )
UpperCAmelCase : Optional[Any] = bin_exp_mod(_A , _A , _A )
if b != 1:
UpperCAmelCase : List[Any] = True
for _ in range(_A ):
if b == n - 1:
UpperCAmelCase : int = False
break
UpperCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a : Tuple = abs(int(input("Enter bound : ").strip()))
print("Here\'s the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 311
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =data
a__ =[0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def _UpperCAmelCase ( lowercase_, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
a__ =self.data + padding + struct.pack('''>Q''', 8 * len(self.data ) )
return padded_data
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def _UpperCAmelCase ( self, lowercase_ ) -> List[Any]:
"""simple docstring"""
a__ =list(struct.unpack('''>16L''', lowercase_ ) ) + [0] * 64
for i in range(16, 80 ):
a__ =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.padding()
a__ =self.split_blocks()
for block in self.blocks:
a__ =self.expand_block(lowercase_ )
a__, a__, a__, a__, a__ =self.h
for i in range(0, 80 ):
if 0 <= i < 20:
a__ =(b & c) | ((~b) & d)
a__ =0X5a827999
elif 20 <= i < 40:
a__ =b ^ c ^ d
a__ =0X6ed9eba1
elif 40 <= i < 60:
a__ =(b & c) | (b & d) | (c & d)
a__ =0X8f1bbcdc
elif 60 <= i < 80:
a__ =b ^ c ^ d
a__ =0Xca62c1d6
a__, a__, a__, a__, a__ =(
self.rotate(lowercase_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(lowercase_, 30 ),
c,
d,
)
a__ =(
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =b'''Test String'''
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
a__ =parser.parse_args()
a__ =args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
a__ =f.read()
else:
a__ =bytes(_A , '''utf-8''' )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 188
| 0
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
| 0
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( a__ ) -> Dict[str, torch.Tensor]:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for rt in rc.restypes:
__SCREAMING_SNAKE_CASE = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__SCREAMING_SNAKE_CASE = {name: i for i, name in enumerate(a__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__SCREAMING_SNAKE_CASE = torch.tensor(
a__ , dtype=torch.intaa , device=protein['aatype'].device , )
__SCREAMING_SNAKE_CASE = torch.tensor(
a__ , dtype=torch.intaa , device=protein['aatype'].device , )
__SCREAMING_SNAKE_CASE = torch.tensor(
a__ , dtype=torch.floataa , device=protein['aatype'].device , )
__SCREAMING_SNAKE_CASE = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__SCREAMING_SNAKE_CASE = restype_atomaa_to_atomaa[protein_aatype]
__SCREAMING_SNAKE_CASE = restype_atomaa_mask[protein_aatype]
__SCREAMING_SNAKE_CASE = residx_atomaa_mask
__SCREAMING_SNAKE_CASE = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__SCREAMING_SNAKE_CASE = restype_atomaa_to_atomaa[protein_aatype]
__SCREAMING_SNAKE_CASE = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__SCREAMING_SNAKE_CASE = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
__SCREAMING_SNAKE_CASE = rc.restype_atoa[restype_letter]
__SCREAMING_SNAKE_CASE = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__SCREAMING_SNAKE_CASE = rc.atom_order[atom_name]
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = restype_atomaa_mask[protein_aatype]
__SCREAMING_SNAKE_CASE = residx_atomaa_mask
return protein
def __lowercase ( a__ ) -> Dict[str, np.ndarray]:
__SCREAMING_SNAKE_CASE = tree_map(lambda a__ : torch.tensor(a__ , device=batch['aatype'].device ) , a__ , np.ndarray )
__SCREAMING_SNAKE_CASE = tensor_tree_map(lambda a__ : np.array(a__ ) , make_atomaa_masks(a__ ) )
return out
| 257
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] =input('''Enter image url: ''').strip()
print(F'''Downloading image from {url} ...''')
lowerCAmelCase__ : int =BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase__ : Union[str, Any] =soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCAmelCase__ : int =requests.get(image_url).content
lowerCAmelCase__ : Optional[int] =F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 257
| 1
|
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE :Any = 300 # TEMPERATURE (unit = K)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=2 , _lowercase=32 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=32 , _lowercase=4 , _lowercase=[0, 1, 2, 3] , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=3 , _lowercase=[1, 384, 24, 24] , _lowercase=True , _lowercase=None , )-> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = backbone_out_indices
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = backbone_featmap_shape
UpperCamelCase_ = scope
UpperCamelCase_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Any:
UpperCamelCase_ = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> str:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ :Any = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :List[Any] = False
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = DPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def UpperCAmelCase_ ( self )-> Any:
pass
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
if model_class in get_values(_lowercase ):
continue
UpperCamelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = False
UpperCamelCase_ = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_lowercase )
# Skip the check for the backbone
UpperCamelCase_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase_ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self )-> int:
pass
@slow
def UpperCAmelCase_ ( self )-> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase_ = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = "add"
with self.assertRaises(_lowercase ):
UpperCamelCase_ = DPTForDepthEstimation(_lowercase )
def lowerCAmelCase( )-> Any:
"""simple docstring"""
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCamelCase_ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(_lowercase )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_lowercase )
UpperCamelCase_ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _lowercase )
UpperCamelCase_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _lowercase , atol=1e-4 ) )
| 60
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """bert"""
def __init__( self , __lowerCamelCase=3_0522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : Dict = vocab_size
__A : Any = hidden_size
__A : str = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = hidden_act
__A : List[Any] = intermediate_size
__A : Tuple = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Optional[Any] = type_vocab_size
__A : Optional[Any] = initializer_range
__A : Dict = layer_norm_eps
__A : Any = position_embedding_type
__A : Optional[int] = use_cache
__A : str = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__A : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 179
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger()
@dataclass
class _lowerCamelCase:
lowercase_ : nn.Module
lowercase_ : List[nn.Module] = field(default_factory=_a )
lowercase_ : list = field(default_factory=_a )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = len(list(m.modules())) == 1 or isinstance(lowerCamelCase, nn.Convad) or isinstance(lowerCamelCase, nn.BatchNormad)
if has_not_submodules:
self.traced.append(lowerCamelCase)
def __call__( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(lowerCamelCase)
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return list(filter(lambda lowerCamelCase: len(list(x.state_dict().keys())) > 0, self.traced))
@dataclass
class _lowerCamelCase:
lowercase_ : nn.Module
lowercase_ : nn.Module
lowercase_ : int = 0
lowercase_ : List = field(default_factory=_a )
lowercase_ : List = field(default_factory=_a )
def __call__( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = Tracker(self.dest)(lowerCamelCase).parametrized
_lowercase : List[Any] = Tracker(self.src)(lowerCamelCase).parametrized
_lowercase : str = list(filter(lambda lowerCamelCase: type(lowerCamelCase) not in self.src_skip, lowerCamelCase))
_lowercase : str = list(filter(lambda lowerCamelCase: type(lowerCamelCase) not in self.dest_skip, lowerCamelCase))
if len(lowerCamelCase) != len(lowerCamelCase):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCamelCase)} operations while'''
F''' destination module has {len(lowerCamelCase)}.''')
for dest_m, src_m in zip(lowerCamelCase, lowerCamelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''')
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True ) -> Optional[Any]:
print(F'''Converting {name}...''' )
with torch.no_grad():
_lowercase : int = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
_lowercase : Tuple = ResNetForImageClassification(lowerCamelCase_ ).eval()
_lowercase : Tuple = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ )
_lowercase : Union[str, Any] = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCamelCase_ )
assert torch.allclose(from_model(lowerCamelCase_ ) , our_model(lowerCamelCase_ ).logits ), "The model logits don't match the original one."
_lowercase : Union[str, Any] = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(lowerCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
# we can use the convnext one
_lowercase : Tuple = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(F'''Pushed {checkpoint_name}''' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True ) -> List[str]:
_lowercase : Optional[Any] = 'imagenet-1k-id2label.json'
_lowercase : Dict = 1000
_lowercase : int = (1, num_labels)
_lowercase : List[str] = 'huggingface/label-files'
_lowercase : int = num_labels
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : Optional[int] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = idalabel
_lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
_lowercase : Union[str, Any] = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
_lowercase : str = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCamelCase_ , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 84
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Tuple = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 84
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'deformable_detr'
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , lowercase=True , lowercase=None , lowercase=3 , lowercase=300 , lowercase=1_024 , lowercase=6 , lowercase=1_024 , lowercase=8 , lowercase=6 , lowercase=1_024 , lowercase=8 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=True , lowercase=False , lowercase="sine" , lowercase="resnet50" , lowercase=True , lowercase=False , lowercase=4 , lowercase=4 , lowercase=4 , lowercase=False , lowercase=300 , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=0.1 , lowercase=0.25 , lowercase=False , **lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase , lowercase ):
lowerCAmelCase = backbone_config.get("""model_type""" )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(lowercase )
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# deformable attributes
lowerCAmelCase = num_feature_levels
lowerCAmelCase = encoder_n_points
lowerCAmelCase = decoder_n_points
lowerCAmelCase = two_stage
lowerCAmelCase = two_stage_num_proposals
lowerCAmelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
lowerCAmelCase = focal_alpha
lowerCAmelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 46
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE__ = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase )}, but is'
f' {type(lowercase )}' )
lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase = additional_special_tokens_extended
else:
lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , pad_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = mask_token_sent
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# add special tokens to encoder dict
lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.offset
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase = self.sp_model.piece_to_id(lowercase )
return sp_id + self.offset
def _snake_case ( self , lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _snake_case ( self , lowercase=False ) -> Tuple:
return 1
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 46
| 1
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self , __a , __a = None , __a = None , __a = None , __a = True , ) -> List[Any]:
UpperCamelCase = [file for file in os.listdir(__a ) if os.path.isfile(os.path.join(__a , __a ) )]
if identifier is not None:
UpperCamelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__a , __a ):
for n_ in n_identifier:
UpperCamelCase = [file for file in files if n_ not in file]
else:
UpperCamelCase = [file for file in files if n_identifier not in file]
UpperCamelCase = ignore_files or []
ignore_files.append("__init__.py" )
UpperCamelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __a )
if only_modules:
UpperCamelCase = file.split("." )[0]
try:
UpperCamelCase = getattr(__a , __a )
UpperCamelCase = doctest.DocTestSuite(__a )
UpperCamelCase = unittest.TextTestRunner().run(__a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"{module_identifier} is not a module." )
else:
UpperCamelCase = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = Path("src/transformers" )
UpperCamelCase = "modeling"
UpperCamelCase = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__a , identifier=__a , ignore_files=__a )
def snake_case_ (self ) -> Dict:
UpperCamelCase = Path("src/transformers" )
UpperCamelCase = "tokenization"
self.analyze_directory(__a , identifier=__a )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = Path("src/transformers" )
UpperCamelCase = "configuration"
self.analyze_directory(__a , identifier=__a )
def snake_case_ (self ) -> int:
UpperCamelCase = Path("src/transformers" )
UpperCamelCase = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__a , n_identifier=__a )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = Path("docs/source" )
UpperCamelCase = ["favicon.ico"]
self.analyze_directory(__a , ignore_files=__a , only_modules=__a )
| 244
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.01 , _SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 244
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Optional[Any] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89
| 0
|
from __future__ import annotations
from collections.abc import Generator
def __snake_case ( ):
__a = {}
__a = 2
while True:
__a = factor_map.pop(_UpperCAmelCase , _UpperCAmelCase )
if factor:
__a = factor + prime
while x in factor_map:
x += factor
__a = factor
else:
__a = prime
yield prime
prime += 1
def __snake_case ( _UpperCAmelCase = 1E1_0 ):
__a = sieve()
__a = 1
while True:
__a = next(_UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 131
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = 1
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(__SCREAMING_SNAKE_CASE)
return image
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
def extract(*__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Dict):
class _A :
def __init__( self : int):
'''simple docstring'''
__a = torch.ones([0])
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.pixel_values.to(__SCREAMING_SNAKE_CASE)
return self
return Out()
return extract
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''')
__a = output.images
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE)
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''')
__a = output.images
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(0)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__SCREAMING_SNAKE_CASE)
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
assert isinstance(pipe.scheduler , __SCREAMING_SNAKE_CASE)
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.dummy_cond_unet
__a = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE)
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
# put models in fp16
__a = unet.half()
__a = vae.half()
__a = bert.half()
# make sure here that pndm scheduler skips prk
__a = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''A painting of a squirrel eating a burger'''
__a = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE)
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
__a = 4_003_660_346
__a = 7
# without safety guidance (sld_guidance_scale = 0)
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
# without safety guidance (strong configuration)
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__SCREAMING_SNAKE_CASE)
__a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = '''padme amidala taking a bath artwork, safe for work, no nudity'''
__a = 2_734_971_755
__a = 7
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''')
__a = sd_pipe.to(__SCREAMING_SNAKE_CASE)
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
__a = 1_044_355_234
__a = 12
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-7
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
__a = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__a = output.images
__a = image[0, -3:, -3:, -1]
__a = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 131
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
snake_case__ : str = None
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Dict = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
snake_case__ : Any = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
snake_case__ : Dict = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''token_type_ids''']
__UpperCamelCase = FNetTokenizer
def __init__( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="[SEP]" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : Union[str, Any]="[CLS]" , UpperCamelCase_ : int="[MASK]" , **UpperCamelCase_ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase : int = (
AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ , normalized=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else mask_token
)
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : str = remove_space
lowerCAmelCase : Any = keep_accents
lowerCAmelCase : int = vocab_file
lowerCAmelCase : List[str] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[int] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : List[str] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 60
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : int , UpperCamelCase_ : VQModel , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[int] , ):
lowerCAmelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : List[str] = {}
if accepts_eta:
lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase : List[str] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCAmelCase : Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase : Dict = self.vqvae.decode(UpperCamelCase_ ).sample
lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 60
| 1
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 1.5
A__ = int(factor * num_class_images )
A__ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=lowerCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
A__ = client.query(text=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
A__ = int(factor * num_images )
A__ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowerCAmelCase__ , aesthetic_weight=0.1 , )
A__ = 0
A__ = 0
A__ = tqdm(desc='downloading real regularization images' , total=lowerCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
A__ = class_images[count]
count += 1
try:
A__ = requests.get(images['url'] )
if img.status_code == 200:
A__ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = argparse.ArgumentParser('' , add_help=lowerCAmelCase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=lowerCAmelCase__ , type=lowerCAmelCase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=lowerCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
__lowerCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 352
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Optional[int]:
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case__ ( self ) -> int:
A__ = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
A__ = [sys.executable] + distributed_args
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
| 154
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int:
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
lowerCAmelCase_ :Optional[int] = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [False] * len(lowercase__ )
lowerCAmelCase_ :str = []
queue.append(lowercase__ )
lowerCAmelCase_ :Any = True
while queue:
lowerCAmelCase_ :Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :int = u
return visited[t]
def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ ))
lowerCAmelCase_ :str = 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = float("""Inf""" )
lowerCAmelCase_ :List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] )
lowerCAmelCase_ :Union[str, Any] = parent[s]
max_flow += path_flow
lowerCAmelCase_ :Tuple = sink
while v != source:
lowerCAmelCase_ :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase_ :Union[str, Any] = parent[v]
return max_flow
__UpperCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCAmelCase , __UpperCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 84
| 1
|
import math
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = 0
A_ : Dict = n
while left <= right:
A_ : List[str] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A_ : Tuple = mid - 1
else:
A_ : List[str] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )->Any:
'''simple docstring'''
A_ : List[Any] = parent
A_ : int = batch_size
A_ : str = seq_length
A_ : int = is_training
A_ : Any = use_token_type_ids
A_ : Union[str, Any] = use_labels
A_ : Any = vocab_size
A_ : Dict = hidden_size
A_ : Dict = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Dict = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : str = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : Union[str, Any] = num_labels
A_ : List[str] = num_choices
A_ : Union[str, Any] = scope
A_ : Any = self.vocab_size - 1
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = None
if self.use_token_type_ids:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : str = None
A_ : Union[str, Any] = None
A_ : Optional[int] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Optional[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : int = OpenAIGPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : int = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
A_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : int = OpenAIGPTLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = OpenAIGPTDoubleHeadsModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : str = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : Any = self.num_labels
A_ : List[Any] = OpenAIGPTForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Optional[int] = config_and_inputs
A_ : int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Optional[int]:
'''simple docstring'''
A_ : Optional[Any] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
A_ : List[Any] = inputs_dict['''labels''']
A_ : Any = inputs_dict['''labels''']
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Any = OpenAIGPTModelTester(self )
A_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = OpenAIGPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president is
A_ : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : Dict = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
| 65
| 1
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__lowerCamelCase )} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
SCREAMING_SNAKE_CASE__ = field(default=__lowerCamelCase , metadata={"""help""": """Whether ot not to use whole word mask."""} )
SCREAMING_SNAKE_CASE__ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
SCREAMING_SNAKE_CASE__ = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
SCREAMING_SNAKE_CASE__ = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __magic_name__ ( __a : DataTrainingArguments , __a : PreTrainedTokenizer , __a : bool = False , __a : Optional[str] = None , ):
'''simple docstring'''
def _dataset(__a : List[str] , __a : List[str]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=__a , file_path=__a , block_size=args.block_size , ref_path=__a , )
return LineByLineTextDataset(tokenizer=__a , file_path=__a , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__a , file_path=__a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__a , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__a ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCamelCase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCamelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
UpperCamelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
UpperCamelCase__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ = AutoModelWithLMHead.from_config(__a )
model.resize_token_embeddings(len(__a ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
UpperCamelCase__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCamelCase__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCamelCase__ = (
get_dataset(__a , tokenizer=__a , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCamelCase__ = (
get_dataset(__a , tokenizer=__a , evaluate=__a , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCamelCase__ = DataCollatorForPermutationLanguageModeling(
tokenizer=__a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCamelCase__ = DataCollatorForWholeWordMask(
tokenizer=__a , mlm_probability=data_args.mlm_probability )
else:
UpperCamelCase__ = DataCollatorForLanguageModeling(
tokenizer=__a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=__a , args=__a , data_collator=__a , train_dataset=__a , eval_dataset=__a , prediction_loss_only=__a , )
# Training
if training_args.do_train:
UpperCamelCase__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__a )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = math.exp(eval_output["""eval_loss"""] )
UpperCamelCase__ = {"""perplexity""": perplexity}
UpperCamelCase__ = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(__a )
return results
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 244
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 244
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase : Union[str, Any] = MgpstrTokenizer
lowercase : Any = False
lowercase : List[Any] = {}
lowercase : Optional[int] = False
def a__ ( self :Dict ):
super().setUp()
# fmt: off
snake_case_ : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case_ : Any = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
def a__ ( self :Optional[Any] ,**_UpperCamelCase :Optional[int] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ : Any = "tester"
snake_case_ : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def a__ ( self :List[str] ):
pass
def a__ ( self :List[Any] ):
snake_case_ : str = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Optional[int] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Dict = tokenizer.encode([special_token] ,add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,1 )
snake_case_ : str = tokenizer.decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertTrue(special_token not in decoded )
def a__ ( self :str ):
snake_case_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : str = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertNotEqual(len(_SCREAMING_SNAKE_CASE ) ,0 )
snake_case_ : Tuple = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(text_a.replace(""" """ ,"""""" ) ,_SCREAMING_SNAKE_CASE )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def a__ ( self :List[Any] ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def a__ ( self :Optional[int] ):
pass
| 363
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,):
snake_case_ : str = parent
snake_case_ : int = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : List[str] = use_labels
snake_case_ : int = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = projection_dim
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : int = dropout
snake_case_ : int = attention_dropout
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Dict = scope
snake_case_ : Union[str, Any] = bos_token_id
def a__ ( self :Any ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : Union[str, Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case_ : int = input_mask.numpy()
snake_case_ , snake_case_ : Tuple = input_mask.shape
snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = 0
snake_case_ : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCamelCase )
def a__ ( self :str ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ):
snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase )
snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
lowercase : int = False
lowercase : List[Any] = False
lowercase : Dict = False
def a__ ( self :List[Any] ):
snake_case_ : List[str] = BlipTextModelTester(self )
snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Union[str, Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Tuple ):
pass
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def a__ ( self :Any ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :List[Any] ):
pass
@slow
def a__ ( self :Any ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
| 8
| 0
|
def lowerCamelCase_ ( _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCAmelCase__ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
lowerCAmelCase__ : List[Any] = 1 - (matter_density + radiation_density + dark_energy)
lowerCAmelCase__ : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 131
|
import unittest
from transformers import DonutProcessor
lowerCamelCase = '''naver-clova-ix/donut-base'''
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : str )-> int:
lowerCAmelCase__ : Any = DonutProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : Dict = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowerCAmelCase__ : Any = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowerCAmelCase__ : str = self.processor.tokenajson(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 131
| 1
|
def UpperCAmelCase_ (_lowerCAmelCase : Tuple = 60_08_51_47_51_43 ):
try:
__UpperCamelCase : int = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : List[Any] = 2
while i * i <= n:
while n % i == 0:
__UpperCamelCase : List[Any] = i
n //= i
i += 1
if n > 1:
__UpperCamelCase : Optional[Any] = n
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 366
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Dict = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : str = seq_length
__UpperCamelCase : List[Any] = is_training
__UpperCamelCase : str = use_input_mask
__UpperCamelCase : int = use_token_type_ids
__UpperCamelCase : str = use_labels
__UpperCamelCase : List[str] = vocab_size
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : Union[str, Any] = num_attention_heads
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : List[Any] = attention_probs_dropout_prob
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : Union[str, Any] = type_vocab_size
__UpperCamelCase : Optional[Any] = type_sequence_label_size
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : Union[str, Any] = num_labels
__UpperCamelCase : Any = num_choices
__UpperCamelCase : Optional[Any] = scope
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[str] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : int = None
if self.use_labels:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
__UpperCamelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : int = True
__UpperCamelCase : Tuple = LlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
__UpperCamelCase : Union[str, Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
__UpperCamelCase : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[str] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
__UpperCamelCase : Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
__UpperCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
__UpperCamelCase : List[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
# select random slice
__UpperCamelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : List[str] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase : Tuple = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Tuple = False
lowercase : List[Any] = False
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = LlamaModelTester(self )
__UpperCamelCase : List[str] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[int] = 3
__UpperCamelCase : int = input_dict["input_ids"]
__UpperCamelCase : Optional[Any] = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[str] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Any = "single_label_classification"
__UpperCamelCase : List[str] = input_dict["input_ids"]
__UpperCamelCase : Tuple = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : Optional[int] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = 3
__UpperCamelCase : Tuple = "multi_label_classification"
__UpperCamelCase : Any = input_dict["input_ids"]
__UpperCamelCase : str = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCamelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Optional[Any] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Any = ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Union[str, Any] = LlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
__UpperCamelCase : int = original_model(__UpperCamelCase ).last_hidden_state
__UpperCamelCase : List[Any] = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Dict = {"type": scaling_type, "factor": 10.0}
__UpperCamelCase : Optional[Any] = LlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
__UpperCamelCase : Optional[int] = scaled_model(__UpperCamelCase ).last_hidden_state
__UpperCamelCase : Tuple = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Tuple = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__UpperCamelCase : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCamelCase : List[str] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Tuple = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__UpperCamelCase : str = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCamelCase : int = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Any = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__UpperCamelCase : Any = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCamelCase : Any = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Union[str, Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCamelCase : Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__UpperCamelCase : Optional[Any] = model(torch.tensor(__UpperCamelCase ) )
__UpperCamelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCamelCase : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : List[str] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__UpperCamelCase : List[str] = "Simply put, the theory of relativity states that "
__UpperCamelCase : Optional[Any] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__UpperCamelCase : Dict = tokenizer.encode(__UpperCamelCase , return_tensors="pt" )
__UpperCamelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__UpperCamelCase )
# greedy generation outputs
__UpperCamelCase : List[Any] = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 171
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[int]=None):
if attention_mask is None:
lowercase__ : List[str] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case_ :
__A : Union[str, Any] = OPTConfig
__A : List[str] = {}
__A : str = "gelu"
def __init__( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=13 , lowercase_ : List[str]=7 , lowercase_ : Tuple=True , lowercase_ : Dict=False , lowercase_ : Dict=99 , lowercase_ : int=16 , lowercase_ : Any=2 , lowercase_ : int=4 , lowercase_ : Optional[int]=4 , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=20 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=0 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=16 , ) -> Any:
lowercase__ : Optional[int] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Dict = seq_length
lowercase__ : Optional[int] = is_training
lowercase__ : Union[str, Any] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Dict = max_position_embeddings
lowercase__ : Optional[int] = eos_token_id
lowercase__ : Optional[int] = pad_token_id
lowercase__ : Dict = bos_token_id
lowercase__ : List[str] = embed_dim
lowercase__ : List[str] = word_embed_proj_dim
lowercase__ : List[str] = False
def __UpperCamelCase ( self : str ) -> List[Any]:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__ : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowercase__ : Union[str, Any] = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[Any] ) -> Tuple:
lowercase__ : Tuple = TFOPTModel(config=lowercase_ )
lowercase__ : Optional[Any] = inputs_dict["input_ids"]
lowercase__ : Optional[int] = input_ids[:1, :]
lowercase__ : int = inputs_dict["attention_mask"][:1, :]
lowercase__ : List[Any] = 1
# first forward pass
lowercase__ : Any = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowercase__ , lowercase__ : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase__ : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase__ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ )[0]
lowercase__ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase__ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase__ : Dict = output_from_no_past[:, -3:, random_slice_idx]
lowercase__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
@require_tf
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Any = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__A : str = (TFOPTForCausalLM,) if is_tf_available() else ()
__A : Tuple = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__A : Tuple = False
__A : Union[str, Any] = False
__A : int = False
__A : Dict = 10
def __UpperCamelCase ( self : Dict ) -> List[Any]:
lowercase__ : List[str] = TFOPTModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ : Tuple , lowercase_ : str ):
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowercase__ : List[str] = model_class(config=lowercase_ )
lowercase__ : List[Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowercase__ : List[Any] = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowercase__ : Optional[Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowercase__ : Tuple = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowercase__ : Dict = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowercase__ : List[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase__ : List[Any] = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowercase__ : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase__ : List[str] = False
self.assertTrue(lowercase_ )
def lowercase_ ( _lowerCamelCase : int):
return tf.constant(_lowerCamelCase , dtype=tf.intaa)
@require_tf
class snake_case_ ( unittest.TestCase ):
__A : str = 99
def __UpperCamelCase ( self : str ) -> Dict:
lowercase__ : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowercase__ : Optional[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowercase__ : int = input_ids.shape[0]
lowercase__ : Any = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : Dict = TFOPTModel.from_pretrained("facebook/opt-350m" )
lowercase__ : Dict = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowercase__ : Tuple = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowercase__ : Union[str, Any] = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowercase__ : str = (1, 11, 5_12)
self.assertEqual(output.shape , lowercase_ )
lowercase__ : str = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4E-3 ) )
lowercase__ : Union[str, Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
lowercase__ : List[str] = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4E-2 ) )
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
super().setUp()
lowercase__ : str = "facebook/opt-350m"
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Dict = TFOPTForCausalLM.from_pretrained(self.path_model )
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.path_model )
lowercase__ : List[str] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowercase__ : Tuple = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowercase__ : Optional[Any] = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
lowercase__ : Dict = tf.function(lowercase_ , jit_compile=lowercase_ )
lowercase__ : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : int ) -> List[Any]:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowercase__ : List[Any] = "facebook/opt-125m"
lowercase__ : Optional[Any] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ )
lowercase__ : List[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowercase__ : Tuple = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
lowercase__ : Any = model.generate(lowercase_ , max_length=10 )
lowercase__ : Optional[int] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Dict = "facebook/opt-350m"
lowercase__ : Optional[int] = GPTaTokenizer.from_pretrained(lowercase_ )
lowercase__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowercase__ : Tuple = "left"
# use different length sentences to test batching
lowercase__ : str = [
"Hello, my dog is a little",
"Today, I",
]
lowercase__ : List[str] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ )
lowercase__ : Dict = inputs["input_ids"]
lowercase__ : int = model.generate(input_ids=lowercase_ , attention_mask=inputs["attention_mask"] )
lowercase__ : Tuple = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
lowercase__ : Dict = model.generate(input_ids=lowercase_ )
lowercase__ : Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
lowercase__ : Optional[Any] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
lowercase__ : List[str] = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowercase__ : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowercase__ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowercase__ : List[Any] = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Optional[int] = "facebook/opt-350m"
lowercase__ : Union[str, Any] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ )
lowercase__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowercase__ : List[str] = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
lowercase__ : Optional[Any] = model.generate(lowercase_ , max_length=10 )
lowercase__ : int = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 87
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : int = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
| 0
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
class __magic_name__ ( snake_case ):
def __init__( self , *_lowercase , **_lowercase )-> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 354
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE :Tuple = 16
SCREAMING_SNAKE_CASE :Optional[Any] = 32
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1_6 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
model.eval()
UpperCamelCase_ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
UpperCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["lr"]
UpperCamelCase_ = int(config["num_epochs"] )
UpperCamelCase_ = int(config["seed"] )
UpperCamelCase_ = int(config["batch_size"] )
UpperCamelCase_ = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
UpperCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_ = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase_ = 1
UpperCamelCase_ = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase_ = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_ = 0
UpperCamelCase_ = evaluate.load("glue" , "mrpc" )
UpperCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase_ = args.resume_from_checkpoint.split("epoch_" )[1]
UpperCamelCase_ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase_ = int(SCREAMING_SNAKE_CASE_ ) + 1
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint performance:" , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , "r" ) as f:
UpperCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase_ = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase_ = f"epoch_{epoch}"
UpperCamelCase_ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = accuracy
UpperCamelCase_ = lr_scheduler.get_lr()[0]
UpperCamelCase_ = optimizer.param_groups[0]["lr"]
UpperCamelCase_ = epoch
UpperCamelCase_ = overall_step
accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="Number of train epochs." , )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 60
| 0
|
def lowerCAmelCase_ ( __A, __A ) -> tuple[float, float]:
'''simple docstring'''
if not len(__A ) == len(__A ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = equationa
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = equationa
# Calculate the determinants of the matrices
UpperCAmelCase__ = aa * ba - aa * ba
UpperCAmelCase__ = ca * ba - ca * ba
UpperCAmelCase__ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase__ = determinant_x / determinant
UpperCAmelCase__ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 65
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : list[int] , lowercase : int ) -> int:
if len(lowercase ) < k or k < 0:
raise ValueError("Invalid Input" )
_a = _a = sum(array[:k] )
for i in range(len(lowercase ) - k ):
_a = current_sum - array[i] + array[i + k]
_a = max(lowercase , lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase_ : int = [randint(-10_00, 10_00) for i in range(1_00)]
lowerCAmelCase_ : List[str] = randint(0, 1_10)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 361
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346
| 0
|
import numpy as np
def A ( _lowerCamelCase ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 8
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase ( A_ ):
A__ : Optional[int] = ""
A__ : Optional[int] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(self : Optional[Any] , snake_case__ : Optional[DatasetInfo] = None , snake_case__ : Optional[str] = None , **snake_case__ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(self , **snake_case__ )
snake_case : List[Any] = repo_info
snake_case : Any = token
snake_case : int = None
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
if self.dir_cache is None:
snake_case : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
snake_case : Optional[int] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(snake_case__ ): {"name": str(snake_case__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str , snake_case__ : str = "rb" , **snake_case__ : str , ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(self.repo_info , snake_case__ ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
snake_case : List[str] = hf_hub_url(self.repo_info.id , snake_case__ , revision=self.repo_info.sha )
return fsspec.open(
snake_case__ , mode=snake_case__ , headers=get_authentication_headers_for_url(snake_case__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
self._get_dirs()
snake_case : int = self._strip_protocol(snake_case__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple=False , **snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self._get_dirs()
snake_case : Dict = PurePosixPath(path.strip("/" ) )
snake_case : Dict = {}
for p, f in self.dir_cache.items():
snake_case : str = PurePosixPath(p.strip("/" ) )
snake_case : Optional[Any] = p.parent
if root == path:
snake_case : Optional[int] = f
snake_case : Optional[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 10
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10
| 1
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = str(id_ )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = []
__lowerCAmelCase = {} # {vertex:distance}
def __lt__( self , __a ):
return self.key < other.key
def __repr__( self ):
return self.id
def snake_case ( self , __a ):
self.neighbors.append(__a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = weight
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = graph[:]
while q:
__lowerCAmelCase = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
__lowerCAmelCase = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_A = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : Dict = _TestCommandArgs(dataset=lowerCAmelCase , all_configs=lowerCAmelCase , save_infos=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = TestCommand(*lowerCAmelCase )
test_command.run()
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase , """README.md""" )
assert os.path.exists(lowerCAmelCase )
UpperCAmelCase__ : List[str] = DatasetInfosDict.from_directory(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_35_15_63,
"""num_examples""": 1_00_00,
},
{
"""name""": """validation""",
"""num_bytes""": 23_84_18,
"""num_examples""": 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = getattr(dataset_infos["""default"""] , lowerCAmelCase ), getattr(expected_dataset_infos["""default"""] , lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase , lowerCAmelCase )
elif key == "splits":
assert list(lowerCAmelCase ) == list(lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 171
| 0
|
_snake_case = 8.3_1_4_4_5_9_8
def _A ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 355
|
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
def _A ( snake_case=None , snake_case=None ) -> Any:
return field(default_factory=lambda: default , metadata=snake_case )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=lowerCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
_SCREAMING_SNAKE_CASE : List[str] = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : WavaVecaProcessor
_SCREAMING_SNAKE_CASE : Union[bool, str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = [{"input_values": feature["input_values"]} for feature in features]
_lowercase : Dict = [{"input_ids": feature["labels"]} for feature in features]
_lowercase : Any = self.processor.pad(
_UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_lowercase : Union[str, Any] = self.processor.pad(
labels=_UpperCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
_lowercase : List[str] = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_lowercase : Optional[Any] = labels
return batch
class a__ ( lowerCamelCase_ ):
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
model.train()
_lowercase : str = self._prepare_inputs(_UpperCamelCase )
if self.use_amp:
with autocast():
_lowercase : Optional[int] = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
else:
_lowercase : Tuple = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_lowercase : int = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowercase : Any = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_lowercase : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase )
else:
loss.backward()
return loss.detach()
def _A ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : int = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_lowercase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_lowercase : Tuple = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
_lowercase : Any = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
_lowercase : Dict = F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(snake_case ):
_lowercase : List[str] = re.sub(snake_case , "" , batch["sentence"] ).lower() + " "
return batch
_lowercase : int = train_dataset.map(snake_case , remove_columns=["sentence"] )
_lowercase : int = eval_dataset.map(snake_case , remove_columns=["sentence"] )
def extract_all_chars(snake_case ):
_lowercase : Optional[int] = " ".join(batch["text"] )
_lowercase : int = list(set(snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_lowercase : Dict = train_dataset.map(
snake_case , batched=snake_case , batch_size=-1 , keep_in_memory=snake_case , remove_columns=train_dataset.column_names , )
_lowercase : List[Any] = train_dataset.map(
snake_case , batched=snake_case , batch_size=-1 , keep_in_memory=snake_case , remove_columns=eval_dataset.column_names , )
_lowercase : Dict = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
_lowercase : List[str] = {v: k for k, v in enumerate(snake_case )}
_lowercase : Union[str, Any] = vocab_dict[" "]
del vocab_dict[" "]
_lowercase : Dict = len(snake_case )
_lowercase : Dict = len(snake_case )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(snake_case , snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : Dict = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
_lowercase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=snake_case , return_attention_mask=snake_case )
_lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=snake_case , tokenizer=snake_case )
_lowercase : int = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_lowercase : Optional[Any] = min(len(snake_case ) , data_args.max_train_samples )
_lowercase : Any = train_dataset.select(range(snake_case ) )
if data_args.max_val_samples is not None:
_lowercase : Any = eval_dataset.select(range(data_args.max_val_samples ) )
_lowercase : Dict = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case ):
_lowercase , _lowercase : List[str] = torchaudio.load(batch["path"] )
_lowercase : List[Any] = resampler(snake_case ).squeeze().numpy()
_lowercase : str = 1_60_00
_lowercase : Optional[int] = batch["text"]
return batch
_lowercase : Dict = train_dataset.map(
snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_lowercase : List[str] = eval_dataset.map(
snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
_lowercase : str = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(snake_case )
return batch
_lowercase : Any = train_dataset.map(
snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case , num_proc=data_args.preprocessing_num_workers , )
_lowercase : Dict = eval_dataset.map(
snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_lowercase : Optional[int] = datasets.load_metric("wer" )
def compute_metrics(snake_case ):
_lowercase : Dict = pred.predictions
_lowercase : int = np.argmax(snake_case , axis=-1 )
_lowercase : str = processor.tokenizer.pad_token_id
_lowercase : Optional[int] = processor.batch_decode(snake_case )
# we do not want to group tokens when computing the metrics
_lowercase : int = processor.batch_decode(pred.label_ids , group_tokens=snake_case )
_lowercase : Optional[int] = wer_metric.compute(predictions=snake_case , references=snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_lowercase : str = DataCollatorCTCWithPadding(processor=snake_case , padding=snake_case )
# Initialize our Trainer
_lowercase : List[str] = CTCTrainer(
model=snake_case , data_collator=snake_case , args=snake_case , compute_metrics=snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_lowercase : Any = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_lowercase : Tuple = model_args.model_name_or_path
else:
_lowercase : Any = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_lowercase : str = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
_lowercase : List[str] = train_result.metrics
_lowercase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case )
)
_lowercase : Any = min(snake_case , len(snake_case ) )
trainer.log_metrics("train" , snake_case )
trainer.save_metrics("train" , snake_case )
trainer.save_state()
# Evaluation
_lowercase : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowercase : Optional[Any] = trainer.evaluate()
_lowercase : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case )
_lowercase : Tuple = min(snake_case , len(snake_case ) )
trainer.log_metrics("eval" , snake_case )
trainer.save_metrics("eval" , snake_case )
return results
if __name__ == "__main__":
main()
| 199
| 0
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=False , lowercase=True , lowercase=False , lowercase=True , lowercase=33 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Union[str, Any] = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Dict = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Optional[int] = num_choices
_lowerCamelCase : Dict = scope
def A_ ( self ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Any = None
if self.use_input_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
_lowerCamelCase : str = None
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Dict = EsmModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowerCamelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
_lowerCamelCase : Dict = model(UpperCamelCase_ )
_lowerCamelCase : Union[str, Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = EsmForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowerCamelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : int = EsmForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowerCamelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : List[str] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a__, a__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = False
lowerCamelCase__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = ()
lowerCamelCase__ = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def A_ ( self ):
_lowerCamelCase : List[Any] = EsmModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : int = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def A_ ( self ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def A_ ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = EsmModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()[0]
_lowerCamelCase : Tuple = EsmEmbeddings(config=UpperCamelCase_ )
_lowerCamelCase : Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_lowerCamelCase : Optional[int] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_lowerCamelCase : Any = create_position_ids_from_input_ids(UpperCamelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase_ , UpperCamelCase_ ) ) )
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
_lowerCamelCase : Tuple = EsmEmbeddings(config=UpperCamelCase_ )
_lowerCamelCase : Any = torch.empty(2 , 4 , 30 )
_lowerCamelCase : Union[str, Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_lowerCamelCase : Union[str, Any] = torch.as_tensor([expected_single_positions, expected_single_positions] )
_lowerCamelCase : Tuple = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase_ , UpperCamelCase_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def A_ ( self ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def A_ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A_ ( self ):
pass
@require_torch
class lowerCAmelCase__ ( a__ ):
'''simple docstring'''
@slow
def A_ ( self ):
with torch.no_grad():
_lowerCamelCase : int = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
_lowerCamelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase : Any = model(UpperCamelCase_ )[0]
_lowerCamelCase : Any = 33
_lowerCamelCase : str = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase_ )
_lowerCamelCase : int = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def A_ ( self ):
with torch.no_grad():
_lowerCamelCase : Optional[Any] = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
_lowerCamelCase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCamelCase : int = model(UpperCamelCase_ )[0]
# compare the actual values for a slice.
_lowerCamelCase : Optional[Any] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 96
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : List[Any] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case_:
__UpperCamelCase = PegasusConfig
__UpperCamelCase = {}
__UpperCamelCase = '''gelu'''
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : str=3_7 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=2_0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Any=0 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Dict = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Any = 2_0
lowerCAmelCase : Any = model_class_name(UpperCamelCase_ )
lowerCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Dict = 2_0
lowerCAmelCase : Union[str, Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ):
if attention_mask is None:
lowerCAmelCase : Tuple = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = FlaxPegasusModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Tuple = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Dict = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Any = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : int = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : str = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : List[Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : int = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase : str = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' , truncation=UpperCamelCase_ , max_length=5_1_2 , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCamelCase_ , num_beams=2 ).sequences
lowerCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
assert tgt_text == decoded
| 60
| 0
|
from collections import deque
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: List[str] = process_name # process name
lowercase__: int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__: Optional[Any] = arrival_time
lowercase__: str = burst_time # remaining burst time
lowercase__: Optional[Any] = 0 # total time of the process wait in ready queue
lowercase__: Any = 0 # time from arrival time to completion time
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
'''simple docstring'''
# total number of mlfq's queues
lowercase__: Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__: Union[str, Any] = time_slices
# unfinished process is in this ready_queue
lowercase__: Optional[int] = queue
# current time
lowercase__: Optional[Any] = current_time
# finished process is in this sequence queue
lowercase__: deque[Process] = deque()
def SCREAMING_SNAKE_CASE__ ( self ) -> list[str]:
'''simple docstring'''
lowercase__: Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> list[int]:
'''simple docstring'''
lowercase__: Optional[int] = []
for i in range(len(lowerCAmelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> list[int]:
'''simple docstring'''
lowercase__: Any = []
for i in range(len(lowerCAmelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> list[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> deque[Process]:
'''simple docstring'''
lowercase__: deque[Process] = deque() # sequence deque of finished process
while len(lowerCAmelCase__ ) != 0:
lowercase__: str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCAmelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__: Any = 0
# set the process's turnaround time because it is finished
lowercase__: int = self.current_time - cp.arrival_time
# set the completion time
lowercase__: Optional[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase__ )
self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
lowercase__: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCAmelCase__ ) ):
lowercase__: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCAmelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__: List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCAmelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__: Optional[Any] = 0
# set the finish time
lowercase__: int = self.current_time
# update the process' turnaround time because it is finished
lowercase__: Any = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase__ )
self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def SCREAMING_SNAKE_CASE__ ( self ) -> deque[Process]:
'''simple docstring'''
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__: str = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__lowerCAmelCase = Process('''P1''', 0, 53)
__lowerCAmelCase = Process('''P2''', 0, 17)
__lowerCAmelCase = Process('''P3''', 0, 68)
__lowerCAmelCase = Process('''P4''', 0, 24)
__lowerCAmelCase = 3
__lowerCAmelCase = [17, 25]
__lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
__lowerCAmelCase = Process('''P1''', 0, 53)
__lowerCAmelCase = Process('''P2''', 0, 17)
__lowerCAmelCase = Process('''P3''', 0, 68)
__lowerCAmelCase = Process('''P4''', 0, 24)
__lowerCAmelCase = 3
__lowerCAmelCase = [17, 25]
__lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
__lowerCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0)
__lowerCAmelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 288
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__lowerCAmelCase = {'''vinai/bartpho-syllable''': 10_24}
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
lowercase__: Dict = vocab_file
lowercase__: str = monolingual_vocab_file
lowercase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__: List[Any] = {}
lowercase__: Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: str = cnt
cnt += 1
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
lowercase__: Optional[Any] = line.strip().split()[0]
lowercase__: Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: Optional[int] = len(self.fairseq_tokens_to_ids )
lowercase__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = self.__dict__.copy()
lowercase__: Tuple = None
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__: Union[str, Any] = {}
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Optional[int] = [self.cls_token_id]
lowercase__: Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Dict = [self.sep_token_id]
lowercase__: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = ''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: int = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(lowerCAmelCase__ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 288
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowercase_ (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = BertGenerationTokenizer
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = True
def SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
__lowercase = BertGenerationTokenizer(_UpperCAmelCase ,keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = '''<s>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<unk>''' )
self.assertEqual(vocab_keys[1] ,'''<s>''' )
self.assertEqual(vocab_keys[-1] ,'''<pad>''' )
self.assertEqual(len(_UpperCAmelCase ) ,1_0_0_2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = BertGenerationTokenizer(_UpperCAmelCase ,keep_accents=_UpperCAmelCase )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ,)
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ,)
__lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = '''Hello World!'''
__lowercase = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(_UpperCAmelCase ,self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__lowercase = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(_UpperCAmelCase ,self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowercase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__lowercase = ''' '''.join(_UpperCAmelCase )
__lowercase = self.big_tokenizer.encode_plus(_UpperCAmelCase ,return_tensors='''pt''' ,return_token_type_ids=_UpperCAmelCase )
__lowercase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] ,return_tensors='''pt''' ,return_token_type_ids=_UpperCAmelCase )
__lowercase = BertGenerationConfig()
__lowercase = BertGenerationEncoder(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = {'''input_ids''': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' ,revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' ,)
| 104
|
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_snake_case ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_snake_case ):
http_head('https://huggingface.co' )
| 352
|
import enum
import shutil
import sys
lowercase , lowercase : List[Any] = shutil.get_terminal_size()
lowercase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum ):
"""simple docstring"""
__A : List[str] = 0
__A : str = 1
def A_ ( A__ , A__="" ) -> int:
sys.stdout.write(str(A__ ) + end )
sys.stdout.flush()
def A_ ( A__ , A__ , A__="" ) -> int:
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , A__ )
def A_ ( ) -> Any:
forceWrite('\r' )
def A_ ( A__ , A__ ) -> List[str]:
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def A_ ( ) -> Any:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def A_ ( ) -> Any:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 225
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ""
lowercase_ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(self : List[str] , UpperCAmelCase_ : Optional[DatasetInfo] = None , UpperCAmelCase_ : Optional[str] = None , **UpperCAmelCase_ : Optional[Any] , ) ->int:
'''simple docstring'''
super().__init__(self , **UpperCAmelCase_)
lowerCamelCase__: List[str] =repo_info
lowerCamelCase__: Optional[Any] =token
lowerCamelCase__: Optional[Any] =None
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
if self.dir_cache is None:
lowerCamelCase__: Any ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase__: int ={
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCAmelCase_): {"name": str(UpperCAmelCase_), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , **UpperCAmelCase_ : int , ) ->Optional[int]:
'''simple docstring'''
if not isinstance(self.repo_info , UpperCAmelCase_):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""")
lowerCamelCase__: Any =hf_hub_url(self.repo_info.id , UpperCAmelCase_ , revision=self.repo_info.sha)
return fsspec.open(
UpperCAmelCase_ , mode=UpperCAmelCase_ , headers=get_authentication_headers_for_url(UpperCAmelCase_ , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
self._get_dirs()
lowerCamelCase__: List[str] =self._strip_protocol(UpperCAmelCase_)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
self._get_dirs()
lowerCamelCase__: List[str] =PurePosixPath(path.strip("/"))
lowerCamelCase__: Union[str, Any] ={}
for p, f in self.dir_cache.items():
lowerCamelCase__: List[str] =PurePosixPath(p.strip("/"))
lowerCamelCase__: List[Any] =p.parent
if root == path:
lowerCamelCase__: Tuple =f
lowerCamelCase__: int =list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
| 10
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10
| 1
|
from __future__ import annotations
import math
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : float ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(UpperCAmelCase__ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
SCREAMING_SNAKE_CASE = math.log(len(UpperCAmelCase__ ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 363
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : List[Any] = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Any = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names}
_lowerCamelCase : Optional[Any] = {f"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Union[str, Any] = FunnelTokenizer
lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = 2
def __init__( self : str , _UpperCamelCase : str=None , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : str="<unk>" , _UpperCamelCase : Optional[Any]="<sep>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : int="<cls>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : Union[str, Any]="<s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=None , _UpperCamelCase : Dict="##" , **_UpperCamelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , clean_text=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , wordpieces_prefix=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206
| 0
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A = logging.get_logger(__name__)
class _snake_case :
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict ):
__lowerCamelCase : int = question_encoder
__lowerCamelCase : Optional[int] = generator
__lowerCamelCase : int = self.question_encoder
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[Any] ):
if os.path.isfile(lowercase_ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
__lowerCamelCase : Dict = os.path.join(lowercase_ , "question_encoder_tokenizer" )
__lowerCamelCase : Dict = os.path.join(lowercase_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(lowercase_ )
self.generator.save_pretrained(lowercase_ )
@classmethod
def lowerCamelCase__ ( cls : List[str] , UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ):
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase : Any = kwargs.pop("config" , lowercase_ )
if config is None:
__lowerCamelCase : Optional[Any] = RagConfig.from_pretrained(lowercase_ )
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=lowercase_ , generator=lowercase_ )
def __call__( self : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
return self.current_tokenizer(*lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
return self.generator.batch_decode(*lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Any , *UpperCAmelCase : str , **UpperCAmelCase : Any ):
return self.generator.decode(*lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[Any] = self.question_encoder
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : List[Any] = self.generator
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "longest" , UpperCAmelCase : str = None , UpperCAmelCase : bool = True , **UpperCAmelCase : int , ):
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , lowercase_ , )
if max_length is None:
__lowerCamelCase : List[Any] = self.current_tokenizer.model_max_length
__lowerCamelCase : Dict = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase : Optional[int] = self.current_tokenizer.model_max_length
__lowerCamelCase : int = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
__lowerCamelCase : Optional[Any] = labels['input_ids']
return model_inputs
| 135
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCamelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class A ( unittest.TestCase ):
def lowerCamelCase ( self : Dict , lowercase_ : Path , lowercase_ : Union[str, None] = None , lowercase_ : Union[List[str], None] = None , lowercase_ : Union[str, List[str], None] = None , lowercase_ : bool = True , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
_lowerCamelCase : Any =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
_lowerCamelCase : Any =[file for file in files if n_ not in file]
else:
_lowerCamelCase : List[Any] =[file for file in files if n_identifier not in file]
_lowerCamelCase : List[Any] =ignore_files or []
ignore_files.append('__init__.py' )
_lowerCamelCase : List[Any] =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_ )
if only_modules:
_lowerCamelCase : Optional[int] =file.split('.' )[0]
try:
_lowerCamelCase : Any =getattr(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[int] =doctest.DocTestSuite(lowercase_ )
_lowerCamelCase : List[Any] =unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_lowerCamelCase : int =doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =Path('src/transformers' )
_lowerCamelCase : Optional[Any] ='modeling'
_lowerCamelCase : List[str] =[
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] =Path('src/transformers' )
_lowerCamelCase : Union[str, Any] ='tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : str =Path('src/transformers' )
_lowerCamelCase : Any ='configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =Path('src/transformers' )
_lowerCamelCase : int =['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : str =Path('docs/source' )
_lowerCamelCase : Dict =['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 199
| 0
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = checkpoints.load_tax_checkpoint(snake_case_ )
_A : Any = flatten_dict(snake_case_ )
return flax_params
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = {}
_A : Union[str, Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
_A : str = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_A : Union[str, Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_A : Tuple = new_key.replace(snake_case_,snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_A : Union[str, Any] = new_key.replace(snake_case_,snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_A : Dict = re.sub(r"""layers_(\d+)""",r"""layer.\1""",snake_case_ )
_A : Union[str, Any] = new_key.replace("""encoder""","""encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_A : Tuple = re.sub(r"""layers_(\d+)""",r"""layer.\1""",snake_case_ )
_A : Dict = flax_dict[key]
_A : List[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_A : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
_A : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False,snake_case_=False ):
_A : List[str] = get_flax_param(snake_case_ )
if not use_large:
_A : List[Any] = PixaStructVisionConfig()
_A : str = PixaStructTextConfig()
else:
_A : str = PixaStructVisionConfig(
hidden_size=1536,d_ff=3968,num_attention_heads=24,num_hidden_layers=18 )
_A : Tuple = PixaStructTextConfig(hidden_size=1536,d_ff=3968,num_heads=24,num_layers=18 )
_A : Any = PixaStructConfig(
vision_config=encoder_config.to_dict(),text_config=decoder_config.to_dict(),is_vqa=snake_case_ )
_A : Optional[Any] = PixaStructForConditionalGeneration(snake_case_ )
_A : Dict = rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
_A : int = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
_A : List[str] = PixaStructImageProcessor()
_A : int = PixaStructProcessor(image_processor=snake_case_,tokenizer=snake_case_ )
if use_large:
_A : Dict = 4096
_A : List[str] = True
# mkdir if needed
os.makedirs(snake_case_,exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print("""Model saved in {}""".format(snake_case_ ) )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_snake_case = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 343
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
__a = 10000
__a = None
__a = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
__a = ParquetConfig
def lowercase ( self : int ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase ( self : Any , _lowerCamelCase : Union[str, Any] ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
_snake_case = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_snake_case = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowerCamelCase ):
with open(_lowerCamelCase , '''rb''' ) as f:
_snake_case = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def lowercase ( self : Union[str, Any] , _lowerCamelCase : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_snake_case = table_cast(_lowerCamelCase , self.info.features.arrow_schema )
return pa_table
def lowercase ( self : Dict , _lowerCamelCase : Tuple ):
_snake_case = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
with open(_lowerCamelCase , '''rb''' ) as f:
_snake_case = pq.ParquetFile(_lowerCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_snake_case = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(_lowerCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' )
raise
| 288
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
class lowerCAmelCase__ ( A_ ):
__a = """masked_bert"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale
| 288
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "BlipImageProcessor"
snake_case_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict ,A : List[str] ,A : Optional[Any] ):
__A = False
super().__init__(A ,A )
__A = self.image_processor
def __call__( self : Any ,A : ImageInput = None ,A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A : bool = True ,A : Union[bool, str, PaddingStrategy] = False ,A : Union[bool, str, TruncationStrategy] = None ,A : Optional[int] = None ,A : int = 0 ,A : Optional[int] = None ,A : Optional[bool] = None ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = False ,A : bool = True ,A : Optional[Union[str, TensorType]] = None ,**A : Optional[Any] ,):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__A = self.tokenizer
__A = self.tokenizer(
text=A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,stride=A ,pad_to_multiple_of=A ,return_attention_mask=A ,return_overflowing_tokens=A ,return_special_tokens_mask=A ,return_offsets_mapping=A ,return_token_type_ids=A ,return_length=A ,verbose=A ,return_tensors=A ,**A ,)
return text_encoding
# add pixel_values
__A = self.image_processor(A ,return_tensors=A )
if text is not None:
__A = self.tokenizer(
text=A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,stride=A ,pad_to_multiple_of=A ,return_attention_mask=A ,return_overflowing_tokens=A ,return_special_tokens_mask=A ,return_offsets_mapping=A ,return_token_type_ids=A ,return_length=A ,verbose=A ,return_tensors=A ,**A ,)
else:
__A = None
if text_encoding is not None:
encoding_image_processor.update(A )
return encoding_image_processor
def UpperCamelCase_ ( self : Tuple ,*A : Optional[int] ,**A : Union[str, Any] ):
return self.tokenizer.batch_decode(*A ,**A )
def UpperCamelCase_ ( self : Tuple ,*A : int ,**A : Dict ):
return self.tokenizer.decode(*A ,**A )
@property
def UpperCamelCase_ ( self : Any ):
__A = self.tokenizer.model_input_names
__A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 353
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,*A : Dict ,**A : Dict ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." ,A ,)
super().__init__(*A ,**A )
| 124
| 0
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'MCTCTFeatureExtractor'
lowerCAmelCase__ = 'AutoTokenizer'
def __init__( self , lowercase , lowercase ) -> str:
super().__init__(lowercase , lowercase )
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
def __call__( self , *lowercase , **lowercase ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase_ = kwargs.pop("raw_speech" )
else:
lowerCamelCase_ = kwargs.pop("audio" , lowercase )
lowerCamelCase_ = kwargs.pop("sampling_rate" , lowercase )
lowerCamelCase_ = kwargs.pop("text" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
lowerCamelCase_ = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_ = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[Any]:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase )
lowerCamelCase_ = kwargs.pop("input_features" , lowercase )
lowerCamelCase_ = kwargs.pop("labels" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if input_features is not None:
lowerCamelCase_ = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
if labels is not None:
lowerCamelCase_ = self.tokenizer.pad(lowercase , **lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_ = labels["input_ids"]
return input_features
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> int:
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE_( self ) -> str:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
| 19
|
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Dict ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 225
| 0
|
import os
from collections.abc import Iterator
def __lowerCamelCase ( a_ : str = "." ) -> Union[str, Any]:
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE__ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).lstrip('''./''' )
def __lowerCamelCase ( a_ : Dict ) -> str:
return f'''{i * " "}*''' if i else "\n##"
def __lowerCamelCase ( a_ : str , a_ : str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE__ ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(SCREAMING_SNAKE_CASE__ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCamelCase ( a_ : str = "." ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :Optional[int] = ''''''
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE__ ) ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = os.path.split(SCREAMING_SNAKE_CASE__ )
if filepath != old_path:
__SCREAMING_SNAKE_CASE :Union[str, Any] = print_path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = (filepath.count(os.sep ) + 1) if filepath else 0
__SCREAMING_SNAKE_CASE :Any = f'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
__SCREAMING_SNAKE_CASE :int = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'''{md_prefix(SCREAMING_SNAKE_CASE__ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 367
|
"""simple docstring"""
import math
import unittest
def __lowerCamelCase ( a_ : int ) -> bool:
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,)
self.assertFalse(
is_prime(1 ) ,'''One only has 1 positive factor, primes must have exactly two.''' ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 239
| 0
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase__ : Any = logging.getLogger(__name__)
def UpperCamelCase_ ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=lowerCamelCase__ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=lowerCamelCase__ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=lowerCamelCase__ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=lowerCamelCase__ , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase_ : int = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
lowerCAmelCase_ : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
lowerCAmelCase_ : Optional[Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase_ : Any = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
lowerCAmelCase_ : Optional[int] = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase_ : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : Tuple = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
lowerCAmelCase_ : List[str] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase_ : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(f"{len(lowerCamelCase__ )} examples to process." )
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1_0000
lowerCAmelCase_ : Optional[Any] = time.time()
for text in data:
lowerCAmelCase_ : Tuple = f"{bos} {text.strip()} {sep}"
lowerCAmelCase_ : int = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
rslt.append(lowerCamelCase__ )
iter += 1
if iter % interval == 0:
lowerCAmelCase_ : Optional[int] = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
lowerCAmelCase_ : Tuple = time.time()
logger.info('Finished binarization' )
logger.info(f"{len(lowerCamelCase__ )} examples processed." )
lowerCAmelCase_ : Union[str, Any] = f"{args.dump_file}.{args.tokenizer_name}.pickle"
lowerCAmelCase_ : Optional[int] = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase_ : Union[str, Any] = [np.uintaa(lowerCamelCase__ ) for d in rslt]
else:
lowerCAmelCase_ : Dict = [np.intaa(lowerCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(lowerCamelCase__ , 'wb' ) as handle:
pickle.dump(rslt_ , lowerCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 224
|
'''simple docstring'''
import argparse
import struct
import unittest
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : List[str] = data
# Initialize hash values
A_ : Tuple = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
A_ : List[Any] = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
A_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a (lowercase ):
A_ : Any = b"""\x80""" + (b"""\x00""" * (63 - (len(lowercase ) + 8) % 64))
A_ : Optional[Any] = struct.pack(""">Q""" , (len(lowercase ) * 8) )
return data + padding + big_endian_integer
def _a (self ):
# Convert into blocks of 64 bytes
A_ : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A_ : Tuple = list(struct.unpack(""">16L""" , lowercase ) )
# add 48 0-ed integers
words += [0] * 48
A_, A_, A_, A_, A_, A_, A_, A_ : Dict = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A_ : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A_ : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A_ : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
A_ : Union[str, Any] = self.ror(lowercase , 6 ) ^ self.ror(lowercase , 11 ) ^ self.ror(lowercase , 25 )
A_ : List[Any] = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
A_ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
A_ : List[str] = self.ror(lowercase , 2 ) ^ self.ror(lowercase , 13 ) ^ self.ror(lowercase , 22 )
A_ : str = (a & b) ^ (a & c) ^ (b & c)
A_ : int = (sa + maj) % 0X1_0000_0000
A_, A_, A_, A_, A_, A_, A_, A_ : List[Any] = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
A_ : int = [a, b, c, d, e, f, g, h]
# Modify final values
A_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
A_ : Any = """""".join([hex(lowercase )[2:].zfill(8 ) for value in self.hashes] )
def _a (self , lowercase , lowercase ):
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
import hashlib
A_ : Optional[Any] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowercase ).hash , hashlib.shaaaa(lowercase ).hexdigest() )
def a ( ):
'''simple docstring'''
import doctest
doctest.testmod()
A_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
A_ : List[str] = parser.parse_args()
A_ : Tuple = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
A_ : Union[str, Any] = f.read()
else:
A_ : Optional[Any] = bytes(lowerCamelCase__ , """utf-8""" )
print(SHAaaa(lowerCamelCase__ ).hash )
if __name__ == "__main__":
main()
| 206
| 0
|
'''simple docstring'''
def A_( A : int = 10 , A : int = 22):
UpperCamelCase = range(1 , A)
UpperCamelCase = range(1 , A)
return sum(
1 for power in powers for base in bases if len(str(base**power)) == power)
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 251
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """openai-gpt"""
lowerCAmelCase_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=40478 , A_=512 , A_=768 , A_=12 , A_=12 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=1e-5 , A_=0.02 , A_="cls_index" , A_=True , A_=None , A_=True , A_=0.1 , **A_ , )-> List[str]:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = afn
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_first_dropout
UpperCamelCase = summary_proj_to_labels
super().__init__(**A_ )
| 251
| 1
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __A ( )-> str:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowerCAmelCase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def __A ( )-> List[str]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def __A ( )-> Tuple:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowerCAmelCase ):
http_head('https://huggingface.co' )
| 39
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10)
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: float
__lowercase: str
__lowercase: bool
@dataclass
class __A :
'''simple docstring'''
__lowercase: int = 42
__lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: Optional[bool] = None
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = """titi"""
__lowercase: Any = """toto"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """titi"""
__lowercase: Optional[Any] = """toto"""
__lowercase: List[Any] = 42
@dataclass
class __A :
'''simple docstring'''
__lowercase: BasicEnum = "toto"
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = BasicEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: MixedTypeEnum = "toto"
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: Optional[str] = None
__lowercase: Optional[List[str]] = list_field(default=[])
__lowercase: Optional[List[int]] = list_field(default=[])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = list_field(default=[])
__lowercase: List[int] = list_field(default=[1, 2, 3])
__lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
__lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = field()
__lowercase: str = field()
__lowercase: BasicEnum = field()
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: "BasicEnum" = field()
__lowercase: "Optional[bool]" = None
__lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
__lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: bool | None = None
@dataclass
class __A :
'''simple docstring'''
__lowercase: int | None = None
__lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: str | None = None
__lowercase: list[str] | None = list_field(default=[])
__lowercase: list[int] | None = list_field(default=[])
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
@dataclass
class __A :
'''simple docstring'''
__lowercase: Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) )
snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 347
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : np.ndarray ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(__snake_case, __snake_case ) ) )
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : np.ndarray ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
A__ : Optional[int] =(
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ : Tuple =(
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
A__ : int =(
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__snake_case )
A__ : Tuple =[]
for value in value_array:
A__ : List[Any] =euclidean(__snake_case, dataset[0] )
A__ : List[str] =dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ : Optional[int] =euclidean(__snake_case, __snake_case )
if dist > temp_dist:
A__ : Optional[int] =temp_dist
A__ : Any =dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : np.ndarray ) -> float:
"""simple docstring"""
return np.dot(__snake_case, __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
A__ : List[Any] =prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a__ :
def __init__( self : Dict , a : Optional[Any] , a : List[Any]=13 , a : Union[str, Any]=7 , a : Any=True , a : Optional[Any]=True , a : Dict=True , a : str=True , a : List[str]=99 , a : str=[1, 1, 2] , a : int=1 , a : List[str]=32 , a : str=4 , a : Union[str, Any]=8 , a : Optional[int]=37 , a : Any="gelu_new" , a : Tuple=0.1 , a : Optional[Any]=0.1 , a : Tuple=0.0 , a : Union[str, Any]=5_12 , a : Dict=3 , a : str=0.02 , a : Union[str, Any]=3 , a : str=4 , a : Dict=None , a : List[str]=False , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : List[str] , a : Tuple , a : Dict , a : Any , a : List[str] , a : Dict , a : int , ):
"""simple docstring"""
__lowerCamelCase = TFFunnelModel(config=a )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(a )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[str] , a : Optional[Any] , a : Optional[Any] , a : int , a : List[str] , a : int , a : Any , ):
"""simple docstring"""
__lowerCamelCase = TFFunnelBaseModel(config=a )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(a )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Dict , a : Optional[Any] , a : Optional[Any] , a : List[Any] , a : Optional[int] , a : str , a : Union[str, Any] , ):
"""simple docstring"""
__lowerCamelCase = TFFunnelForPreTraining(config=a )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : List[Any] , a : str , a : int , a : Tuple , a : Any , a : int , ):
"""simple docstring"""
__lowerCamelCase = TFFunnelForMaskedLM(config=a )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any , a : Tuple , a : str , a : List[Any] , a : Any , a : List[Any] , a : Any , ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=a )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] , a : str , a : Tuple , a : Tuple , a : str , a : Optional[Any] , a : List[str] , ):
"""simple docstring"""
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=a )
__lowerCamelCase = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : Optional[Any] , a : int , a : str , a : Optional[Any] , a : int , a : int , ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=a )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str , a : str , a : List[Any] , a : Dict , a : Optional[int] , a : Dict , ):
"""simple docstring"""
__lowerCamelCase = TFFunnelForQuestionAnswering(config=a )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
__lowerCamelCase
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase : Dict =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : List[str] =(
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Tuple =False
lowerCamelCase : str =False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@require_tf
class a__ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : str =False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = TFFunnelModelTester(self , base=a )
__lowerCamelCase = ConfigTester(self , config_class=a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
| 67
|
from jiwer import compute_measures
import datasets
lowerCamelCase : str = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCamelCase : int = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCamelCase : str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase ( self , A=None , A=None , A=False ) -> List[Any]:
if concatenate_texts:
return compute_measures(A , A )["wer"]
else:
snake_case : Any = 0
snake_case : Any = 0
for prediction, reference in zip(A , A ):
snake_case : Tuple = compute_measures(A , A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 124
| 0
|
import math
from datetime import datetime, timedelta
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100 )
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase__ , 4 , 18 )
else:
return datetime(lowerCAmelCase__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
_UpperCAmelCase = "will be" if year > datetime.now().year else "was"
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 359
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
for param in module.parameters():
UpperCamelCase_ = False
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase_ = datetime.now()
UpperCamelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 328
| 0
|
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = []
lowerCamelCase_ = 1
while len(UpperCAmelCase_ ) < 1E6:
constant.append(str(UpperCAmelCase_ ) )
i += 1
lowerCamelCase_ = "".join(UpperCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 55
|
'''simple docstring'''
import operator as op
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> int:
lowercase_ : Optional[Any] = []
lowercase_ : str = lambda UpperCAmelCase__ , UpperCAmelCase__ : int(x / y ) # noqa: E731 integer division operation
lowercase_ : Optional[Any] = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(UpperCAmelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCAmelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ )
else:
lowercase_ : str = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ )
lowercase_ : Optional[int] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ )
stack.append(
str(opr[x](int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
_lowercase : Tuple = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 239
| 0
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase ( snake_case_ , snake_case_ , snake_case_ ):
UpperCamelCase : List[Any] = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 50257 , UpperCAmelCase__ : int = 1024 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : int = 12 , UpperCAmelCase__ : int = 12 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "gelu_new" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 1E-5 , UpperCAmelCase__ : float = 0.0_2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , ) -> List[Any]:
super().__init__()
_a : Any = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
_a : str = prefix_inner_dim
_a : int = prefix_hidden_dim
_a : int = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_a : List[str] = (
nn.Linear(self.prefix_hidden_dim , UpperCAmelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_a : Dict = GPTaConfig(
vocab_size=UpperCAmelCase__ , n_positions=UpperCAmelCase__ , n_embd=UpperCAmelCase__ , n_layer=UpperCAmelCase__ , n_head=UpperCAmelCase__ , n_inner=UpperCAmelCase__ , activation_function=UpperCAmelCase__ , resid_pdrop=UpperCAmelCase__ , embd_pdrop=UpperCAmelCase__ , attn_pdrop=UpperCAmelCase__ , layer_norm_epsilon=UpperCAmelCase__ , initializer_range=UpperCAmelCase__ , scale_attn_weights=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , scale_attn_by_inverse_layer_idx=UpperCAmelCase__ , reorder_and_upcast_attn=UpperCAmelCase__ , )
_a : Optional[Any] = GPTaLMHeadModel(UpperCAmelCase__ )
def _lowercase ( self : str , UpperCAmelCase__ : torch.Tensor , UpperCAmelCase__ : torch.Tensor , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , ) -> Tuple:
_a : Union[str, Any] = self.transformer.transformer.wte(UpperCAmelCase__ )
_a : List[Any] = self.encode_prefix(UpperCAmelCase__ )
_a : Dict = self.decode_prefix(UpperCAmelCase__ )
_a : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_a : Optional[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_a : Any = torch.cat((dummy_token, input_ids) , dim=1 )
_a : Union[str, Any] = self.transformer(inputs_embeds=UpperCAmelCase__ , labels=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowercase ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.device ) -> torch.Tensor:
return torch.zeros(UpperCAmelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
return self.encode_prefix(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ) -> Dict:
_a : Dict = torch.split(UpperCAmelCase__ , 1 , dim=0 )
_a : Dict = []
_a : Optional[int] = []
for feature in features:
_a : List[str] = self.decode_prefix(feature.to(UpperCAmelCase__ ) ) # back to the clip feature
# Only support beam search for now
_a , _a : Optional[int] = self.generate_beam(
input_embeds=UpperCAmelCase__ , device=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_a : List[str] = torch.stack(UpperCAmelCase__ )
_a : int = torch.stack(UpperCAmelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowercase ( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : int = 67 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : Optional[int] = None , ) -> List[str]:
_a : Dict = eos_token_id
_a : Tuple = None
_a : Tuple = None
_a : str = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=torch.int )
_a : List[Any] = torch.zeros(UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=torch.bool )
if input_embeds is not None:
_a : List[Any] = input_embeds
else:
_a : List[str] = self.transformer.transformer.wte(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
_a : Optional[Any] = self.transformer(inputs_embeds=UpperCAmelCase__ )
_a : Tuple = outputs.logits
_a : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_a : Dict = logits.softmax(-1 ).log()
if scores is None:
_a , _a : Any = logits.topk(UpperCAmelCase__ , -1 )
_a : str = generated.expand(UpperCAmelCase__ , *generated.shape[1:] )
_a , _a : Union[str, Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_a : List[str] = next_tokens
else:
_a : Optional[Any] = tokens.expand(UpperCAmelCase__ , *tokens.shape[1:] )
_a : Optional[int] = torch.cat((tokens, next_tokens) , dim=1 )
else:
_a : Optional[int] = -float(np.inf )
_a : List[Any] = 0
_a : List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_a : Optional[Any] = scores_sum / seq_lengths[:, None]
_a , _a : Optional[int] = scores_sum_average.view(-1 ).topk(UpperCAmelCase__ , -1 )
_a : Optional[int] = next_tokens // scores_sum.shape[1]
_a : Tuple = seq_lengths[next_tokens_source]
_a : Dict = next_tokens % scores_sum.shape[1]
_a : int = next_tokens.unsqueeze(1 )
_a : Optional[Any] = tokens[next_tokens_source]
_a : str = torch.cat((tokens, next_tokens) , dim=1 )
_a : int = generated[next_tokens_source]
_a : List[str] = scores_sum_average * seq_lengths
_a : Tuple = is_stopped[next_tokens_source]
_a : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_a : Tuple = torch.cat((generated, next_token_embed) , dim=1 )
_a : Optional[int] = is_stopped + next_tokens.eq(UpperCAmelCase__ ).squeeze()
if is_stopped.all():
break
_a : Tuple = scores / seq_lengths
_a : Any = scores.argsort(descending=UpperCAmelCase__ )
# tokens tensors are already padded to max_seq_length
_a : Union[str, Any] = [tokens[i] for i in order]
_a : int = torch.stack(UpperCAmelCase__ , dim=0 )
_a : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 324
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 ):
'''simple docstring'''
_a : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_a : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : int = 1_6
elif accelerator.mixed_precision != "no":
_a : int = 8
else:
_a : str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
_a : str = 2
# Initialize accelerator
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Any = config["""lr"""]
_a : Union[str, Any] = int(config["""num_epochs"""] )
_a : str = int(config["""seed"""] )
_a : List[Any] = int(config["""batch_size"""] )
_a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_a : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
_a , _a : Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_a : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Optional[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Optional[Any] = model(**UpperCamelCase__ )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Dict = model(**UpperCamelCase__ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_a : Optional[Any] = parser.parse_args()
_a : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 324
| 1
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, A, A, A, A=1, A=False, **A ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = d_embed
SCREAMING_SNAKE_CASE : List[str] = d_proj
SCREAMING_SNAKE_CASE : List[str] = cutoffs + [vocab_size]
SCREAMING_SNAKE_CASE : int = [0] + self.cutoffs
SCREAMING_SNAKE_CASE : Dict = div_val
SCREAMING_SNAKE_CASE : Tuple = self.cutoffs[0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE : int = self.shortlist_size + self.n_clusters
SCREAMING_SNAKE_CASE : Optional[Any] = keep_order
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Any = []
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE : Dict = self.add_weight(
shape=(self.n_clusters, self.d_embed), initializer='zeros', trainable=A, name='cluster_weight' )
SCREAMING_SNAKE_CASE : Optional[int] = self.add_weight(
shape=(self.n_clusters,), initializer='zeros', trainable=A, name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
SCREAMING_SNAKE_CASE : List[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj), initializer='zeros', trainable=A, name=F"out_projs_._{i}", )
self.out_projs.append(A )
else:
self.out_projs.append(A )
SCREAMING_SNAKE_CASE : List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed), initializer='zeros', trainable=A, name=F"out_layers_._{i}_._weight", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.add_weight(
shape=(self.vocab_size,), initializer='zeros', trainable=A, name=F"out_layers_._{i}_._bias", )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : List[str] = self.d_embed // (self.div_val**i)
SCREAMING_SNAKE_CASE : int = self.add_weight(
shape=(d_emb_i, self.d_proj), initializer='zeros', trainable=A, name=F"out_projs_._{i}" )
self.out_projs.append(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i), initializer='zeros', trainable=A, name=F"out_layers_._{i}_._weight", )
SCREAMING_SNAKE_CASE : List[Any] = self.add_weight(
shape=(r_idx - l_idx,), initializer='zeros', trainable=A, name=F"out_layers_._{i}_._bias", )
self.out_layers.append((weight, bias) )
super().build(A )
@staticmethod
def UpperCamelCase_ ( A, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x
if proj is not None:
SCREAMING_SNAKE_CASE : Tuple = tf.einsum('ibd,ed->ibe', A, A )
return tf.einsum('ibd,nd->ibn', A, A ) + b
@staticmethod
def UpperCamelCase_ ( A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = shape_list(A )
SCREAMING_SNAKE_CASE : int = tf.range(lp_size[0], dtype=target.dtype )
SCREAMING_SNAKE_CASE : List[str] = tf.stack([r, target], 1 )
return tf.gather_nd(A, A )
def UpperCamelCase_ ( self, A, A, A=True, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 0
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._logit(A, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0] )
if target is not None:
SCREAMING_SNAKE_CASE : List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A, logits=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.nn.log_softmax(A, axis=-1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = shape_list(A )
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Tuple = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
SCREAMING_SNAKE_CASE : Dict = (target >= l_idx) & (target < r_idx)
SCREAMING_SNAKE_CASE : Any = tf.where(A )
SCREAMING_SNAKE_CASE : List[Any] = tf.boolean_mask(A, A ) - l_idx
if self.div_val == 1:
SCREAMING_SNAKE_CASE : int = self.out_layers[0][0][l_idx:r_idx]
SCREAMING_SNAKE_CASE : str = self.out_layers[0][1][l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE : int = self.out_layers[i][0]
SCREAMING_SNAKE_CASE : Any = self.out_layers[i][1]
if i == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = tf.concat([cur_W, self.cluster_weight], 0 )
SCREAMING_SNAKE_CASE : int = tf.concat([cur_b, self.cluster_bias], 0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._logit(A, A, A, self.out_projs[0] )
SCREAMING_SNAKE_CASE : Tuple = tf.nn.log_softmax(A )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf.boolean_mask(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._gather_logprob(A, A )
else:
SCREAMING_SNAKE_CASE : Any = self._logit(A, A, A, self.out_projs[i] )
SCREAMING_SNAKE_CASE : List[str] = tf.nn.log_softmax(A )
SCREAMING_SNAKE_CASE : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
SCREAMING_SNAKE_CASE : List[str] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A )
if target is not None:
SCREAMING_SNAKE_CASE : Tuple = tf.boolean_mask(A, A )
SCREAMING_SNAKE_CASE : Tuple = tf.boolean_mask(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._gather_logprob(A, A )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A, -cur_logprob, shape_list(A ) )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.concat(A, axis=-1 )
if target is not None:
if return_mean:
SCREAMING_SNAKE_CASE : str = tf.reduce_mean(A )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A, name=self.name, aggregation='mean' if return_mean else '' )
return out
| 251
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "xmod"
def __init__( self : List[str] , __A : List[str]=3_0_5_2_2 , __A : Tuple=7_6_8 , __A : str=1_2 , __A : List[Any]=1_2 , __A : List[str]=3_0_7_2 , __A : List[str]="gelu" , __A : List[Any]=0.1 , __A : Tuple=0.1 , __A : str=5_1_2 , __A : Union[str, Any]=2 , __A : List[Any]=0.0_2 , __A : List[str]=1e-1_2 , __A : Tuple=1 , __A : List[Any]=0 , __A : Optional[Any]=2 , __A : Optional[int]="absolute" , __A : Optional[int]=True , __A : Dict=None , __A : Optional[int]=False , __A : Dict=2 , __A : List[str]=False , __A : Dict=True , __A : Union[str, Any]=True , __A : Tuple=("en_XX",) , __A : Optional[Any]=None , **__A : Tuple , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : List[str] = adapter_reduction_factor
snake_case__ : List[Any] = adapter_layer_norm
snake_case__ : str = adapter_reuse_layer_norm
snake_case__ : Union[str, Any] = ln_before_adapter
snake_case__ : Tuple = list(__A )
snake_case__ : int = default_language
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : int ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 369
|
import sys
__lowerCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE ( snake_case_ : str = N ):
snake_case__ : Any = -sys.maxsize - 1
for i in range(len(snake_case_ ) - 12 ):
snake_case__ : Tuple = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : Dict = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 286
| 0
|
"""simple docstring"""
import torch
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
if torch.cuda.is_available():
lowercase_ = torch.cuda.device_count()
else:
lowercase_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 136
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase : Union[str, Any] = TypeVar("T")
UpperCAmelCase : Dict = Union[List[T], Tuple[T, ...]]
UpperCAmelCase : int = Union[T, List[T], Dict[str, T]]
UpperCAmelCase : Tuple = Union[str, bytes, os.PathLike]
| 136
| 1
|
def __lowercase ( lowerCamelCase : Any = 1000 ):
UpperCamelCase_ : Dict = 2**power
UpperCamelCase_ : List[str] = str(lowerCamelCase_ )
UpperCamelCase_ : Optional[Any] = list(lowerCamelCase_ )
UpperCamelCase_ : Optional[int] = 0
for i in list_num:
sum_of_num += int(lowerCamelCase_ )
return sum_of_num
if __name__ == "__main__":
a_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
a_ = solution(power)
print('Sum of the digits is: ', result)
| 367
|
from __future__ import annotations
import numpy as np
def __lowercase ( lowerCamelCase : list[float] ):
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 50
| 0
|
'''simple docstring'''
__lowerCAmelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowerCamelCase ( ) -> None:
_a : List[Any] = input('Enter message: ' )
_a : Optional[int] = input('Enter key [alphanumeric]: ' )
_a : Union[str, Any] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
_a : Dict = 'encrypt'
_a : Tuple = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith('d' ):
_a : List[Any] = 'decrypt'
_a : List[Any] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""\n{mode.title()}ed message:""" )
print(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , 'encrypt' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , 'decrypt' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : Optional[Any] = []
_a : List[str] = 0
_a : Union[str, Any] = key.upper()
for symbol in message:
_a : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
_a : List[str] = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 89
|
from __future__ import annotations
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def A_ ( snake_case : int ) -> list[int]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
__UpperCamelCase = []
for num in range(len(snake_case ) ):
__UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
__UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case ) == n:
return list_nums
return []
def A_ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 328
| 0
|
import torch
from torch import nn
class A ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str],lowercase_ : int,lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : int,lowercase_ : int=1,lowercase_ : List[Any]=False )-> List[str]:
'''simple docstring'''
super().__init__()
A__ = n_token
A__ = d_embed
A__ = d_proj
A__ = cutoffs + [n_token]
A__ = [0] + self.cutoffs
A__ = div_val
A__ = self.cutoffs[0]
A__ = len(self.cutoffs ) - 1
A__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A__ = nn.Parameter(torch.zeros(self.n_clusters,self.d_embed ) )
A__ = nn.Parameter(torch.zeros(self.n_clusters ) )
A__ = nn.ModuleList()
A__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_,lowercase_ ) ) )
else:
self.out_projs.append(lowercase_ )
self.out_layers.append(nn.Linear(lowercase_,lowercase_ ) )
else:
for i in range(len(self.cutoffs ) ):
A__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_,lowercase_ ) ) )
self.out_layers.append(nn.Linear(lowercase_,r_idx - l_idx ) )
A__ = keep_order
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : Dict )-> str:
'''simple docstring'''
if proj is None:
A__ = nn.functional.linear(lowercase_,lowercase_,bias=lowercase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A__ = nn.functional.linear(lowercase_,proj.t().contiguous() )
A__ = nn.functional.linear(lowercase_,lowercase_,bias=lowercase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self : Tuple,lowercase_ : Dict,lowercase_ : Tuple=None,lowercase_ : Any=False )-> str:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
A__ = hidden[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = hidden.view(-1,hidden.size(-1 ) )
A__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
A__ = hidden.view(-1,hidden.size(-1 ) )
if self.n_clusters == 0:
A__ = self._compute_logit(lowercase_,self.out_layers[0].weight,self.out_layers[0].bias,self.out_projs[0] )
if labels is not None:
A__ = labels != -1_0_0
A__ = torch.zeros_like(lowercase_,dtype=hidden.dtype,device=hidden.device )
A__ = (
-nn.functional.log_softmax(lowercase_,dim=-1 )[mask].gather(1,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A__ = nn.functional.log_softmax(lowercase_,dim=-1 )
else:
# construct weights and biases
A__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ = self.out_layers[0].weight[l_idx:r_idx]
A__ = self.out_layers[0].bias[l_idx:r_idx]
else:
A__ = self.out_layers[i].weight
A__ = self.out_layers[i].bias
if i == 0:
A__ = torch.cat([weight_i, self.cluster_weight],dim=0 )
A__ = torch.cat([bias_i, self.cluster_bias],dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
A__ = weights[0], biases[0], self.out_projs[0]
A__ = self._compute_logit(lowercase_,lowercase_,lowercase_,lowercase_ )
A__ = nn.functional.log_softmax(lowercase_,dim=1 )
if labels is None:
A__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A__ = torch.zeros_like(lowercase_,dtype=hidden.dtype,device=hidden.device )
A__ = 0
A__ = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
A__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A__ = (labels >= l_idx) & (labels < r_idx)
A__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A__ = labels.index_select(0,lowercase_ ) - l_idx
A__ = head_logprob.index_select(0,lowercase_ )
A__ = hidden.index_select(0,lowercase_ )
else:
A__ = hidden
if i == 0:
if labels is not None:
A__ = head_logprob_i.gather(1,target_i[:, None] ).squeeze(1 )
else:
A__ = head_logprob[:, : self.cutoffs[0]]
else:
A__ = weights[i], biases[i], self.out_projs[i]
A__ = self._compute_logit(lowercase_,lowercase_,lowercase_,lowercase_ )
A__ = nn.functional.log_softmax(lowercase_,dim=1 )
A__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1,target_i[:, None] ).squeeze(1 )
else:
A__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A__ = logprob_i
if labels is not None:
if (hasattr(self,'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0,lowercase_,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def snake_case__ ( self : Union[str, Any],lowercase_ : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
if self.n_clusters == 0:
A__ = self._compute_logit(lowercase_,self.out_layers[0].weight,self.out_layers[0].bias,self.out_projs[0] )
return nn.functional.log_softmax(lowercase_,dim=-1 )
else:
# construct weights and biases
A__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ = self.out_layers[0].weight[l_idx:r_idx]
A__ = self.out_layers[0].bias[l_idx:r_idx]
else:
A__ = self.out_layers[i].weight
A__ = self.out_layers[i].bias
if i == 0:
A__ = torch.cat([weight_i, self.cluster_weight],dim=0 )
A__ = torch.cat([bias_i, self.cluster_bias],dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
A__ = weights[0], biases[0], self.out_projs[0]
A__ = self._compute_logit(lowercase_,lowercase_,lowercase_,lowercase_ )
A__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
A__ = nn.functional.log_softmax(lowercase_,dim=1 )
A__ = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
A__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A__ = head_logprob[:, : self.cutoffs[0]]
else:
A__ = weights[i], biases[i], self.out_projs[i]
A__ = self._compute_logit(lowercase_,lowercase_,lowercase_,lowercase_ )
A__ = nn.functional.log_softmax(lowercase_,dim=1 )
A__ = head_logprob[:, -i] + tail_logprob_i
A__ = logprob_i
return out
| 352
|
from jiwer import compute_measures
import datasets
lowercase_ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase_ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowercase_ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Value('string',id='sequence' ),
'references': datasets.Value('string',id='sequence' ),
} ),codebase_urls=['https://github.com/jitsi/jiwer/'],reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
],)
def snake_case__ ( self : int,lowercase_ : Any=None,lowercase_ : List[str]=None,lowercase_ : Dict=False )-> Optional[int]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowercase_,lowercase_ )["wer"]
else:
A__ = 0
A__ = 0
for prediction, reference in zip(lowercase_,lowercase_ ):
A__ = compute_measures(lowercase_,lowercase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 282
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324
|
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_A = logging.get_logger(__name__)
_A = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCamelCase ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(_A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
else:
return _interleave_iterable_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
def __UpperCamelCase ( _A , _A = None , _A = None , _A = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(_A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A )
else:
return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
| 167
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ : Tuple = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class a ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase = 42
class a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: int ):
"""simple docstring"""
if latents is None:
A__ = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A__ = latents.to(snake_case_ )
A__ = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A__ = torch.device(f"""cuda:{gpu_id}""" )
A__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase ( self: Dict , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ):
A__ = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 )
if not isinstance(snake_case_ , torch.Tensor ):
A__ = self.image_processor(snake_case_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A__ = image.to(dtype=self.image_encoder.dtype , device=snake_case_ )
A__ = self.image_encoder(snake_case_ )['last_hidden_state']
A__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A__ = image_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
A__ = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict = 1 , UpperCamelCase: List[str] = 25 , UpperCamelCase: Tuple = None , UpperCamelCase: int = None , UpperCamelCase: Optional[Any] = 4.0 , UpperCamelCase: Union[str, Any] = 64 , UpperCamelCase: Dict = "pil" , UpperCamelCase: List[str] = True , ):
"""simple docstring"""
if isinstance(snake_case_ , PIL.Image.Image ):
A__ = 1
elif isinstance(snake_case_ , torch.Tensor ):
A__ = image.shape[0]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A__ = len(snake_case_ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}""" )
A__ = self._execution_device
A__ = batch_size * num_images_per_prompt
A__ = guidance_scale > 1.0
A__ = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
A__ = self.scheduler.timesteps
A__ = self.prior.config.num_embeddings
A__ = self.prior.config.embedding_dim
A__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A__ = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
A__ = self.prior(
snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding
# remove the variance
A__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A__ = self.scheduler.step(
snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
A__ = []
for i, latent in enumerate(snake_case_ ):
print()
A__ = self.renderer.decode(
latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(snake_case_ )
A__ = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
A__ = images.cpu().numpy()
if output_type == "pil":
A__ = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 335
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """xmod"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=("en_XX",) , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : int = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Any = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : int = use_cache
A_ : Dict = classifier_dropout
A_ : int = pre_norm
A_ : Optional[Any] = adapter_reduction_factor
A_ : List[Any] = adapter_layer_norm
A_ : int = adapter_reuse_layer_norm
A_ : Dict = ln_before_adapter
A_ : List[str] = list(snake_case_ )
A_ : Union[str, Any] = default_language
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 286
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self :Optional[int] , _lowercase :Any , _lowercase :List[Any]=3 , _lowercase :Any=32 , _lowercase :str=3 , _lowercase :Union[str, Any]=10 , _lowercase :List[Any]=[10, 20, 30, 40] , _lowercase :str=[1, 1, 2, 1] , _lowercase :List[str]=True , _lowercase :Dict=True , _lowercase :Any="relu" , _lowercase :List[Any]=3 , _lowercase :Optional[Any]=None , ) -> int:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_lowercase)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self :List[Any]) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __a ( self :Union[str, Any] , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :Any) -> List[Any]:
UpperCAmelCase_ = TFResNetModel(config=_lowercase)
UpperCAmelCase_ = model(_lowercase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self :int , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :List[str]) -> Any:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFResNetForImageClassification(_lowercase)
UpperCAmelCase_ = model(_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :List[str]) -> str:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
UpperCamelCase__ : Tuple =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : str =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : List[str] =False
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = TFResNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase)
def __a ( self :List[Any]) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :List[str]) -> int:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''')
def __a ( self :int) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''')
def __a ( self :int) -> int:
pass
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :str) -> Optional[int]:
def check_hidden_states_output(_lowercase :str , _lowercase :Tuple , _lowercase :List[Any]):
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_lowercase) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
def __a ( self :Dict) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase)
@slow
def __a ( self :Dict) -> str:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFResNetModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def A ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def __a ( self :Optional[int]) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''tf''')
# forward pass
UpperCAmelCase_ = model(**_lowercase)
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , _lowercase)
UpperCAmelCase_ = tf.constant([-11.1_069, -9.7_877, -8.3_777])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowercase , atol=1E-4))
| 350
|
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344
| 0
|
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_lowerCAmelCase = remove_duplicates(key.upper() )
_lowerCAmelCase = len(snake_case )
# First fill cipher with key characters
_lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(snake_case ) , 26 ):
_lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCAmelCase = alphabet[i - offset]
_lowerCAmelCase = char
return cipher_alphabet
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return "".join(cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = input("""Enter message to encode or decode: """ ).strip()
_lowerCAmelCase = input("""Enter keyword: """ ).strip()
_lowerCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
_lowerCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
_lowerCAmelCase = create_cipher_map(snake_case )
print(func(snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 82
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Optional[int] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''roberta-prelayernorm'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[str]=5_02_65 , __lowerCAmelCase : List[str]=7_68 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Any=30_72 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 363
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A : Optional[Any] = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
| 0
|
"""simple docstring"""
# Imports
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
if red is not None:
UpperCAmelCase_ : Union[str, Any] = red
if green is not None:
UpperCAmelCase_ : Dict = green
if blue is not None:
UpperCAmelCase_ : Optional[int] = blue
if red_edge is not None:
UpperCAmelCase_ : Optional[int] = red_edge
if nir is not None:
UpperCAmelCase_ : str = nir
return True
def UpperCamelCase__ ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
UpperCAmelCase_ : str = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCamelCase__ ( self , lowercase_=0.08 , lowercase_=1.22 , lowercase_=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir - self.green
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCamelCase__ ( self , lowercase_=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCamelCase__ ( self , lowercase_=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase_ : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 61
|
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE :Tuple = datasets.load_iris()
SCREAMING_SNAKE_CASE :Dict = np.array(data['data'])
SCREAMING_SNAKE_CASE :Optional[int] = np.array(data['target'])
SCREAMING_SNAKE_CASE :List[str] = data['target_names']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = train_test_split(X, y)
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(a_ ) - np.array(a_ ) )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=5 ) -> Dict:
"""simple docstring"""
__A = zip(a_ , a_ )
# List of distances of all points from the point to be classified
__A = []
for data_point in data:
__A = euclidean_distance(data_point[0] , a_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__A = [i[1] for i in sorted(a_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__A = Counter(a_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 124
| 0
|
'''simple docstring'''
import os
import numpy
import onnx
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> int:
UpperCAmelCase : Tuple = a.name
UpperCAmelCase : Optional[Any] = b.name
UpperCAmelCase : List[str] = ''''''
UpperCAmelCase : int = ''''''
UpperCAmelCase : Optional[Any] = a == b
UpperCAmelCase : Any = name_a
UpperCAmelCase : List[str] = name_b
return res
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ) -> Any:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase , _lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCAmelCase , _lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> Any:
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ) -> int:
UpperCAmelCase : Any = list(model.graph.initializer )
UpperCAmelCase : Union[str, Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase : Union[str, Any] = inits[i].name
UpperCAmelCase : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : int ) -> Tuple:
UpperCAmelCase : List[str] = os.path.dirname(_lowerCAmelCase )
UpperCAmelCase : List[str] = os.path.basename(_lowerCAmelCase )
UpperCAmelCase : int = onnx.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : List[str] = list(model.graph.initializer )
UpperCAmelCase : Dict = set()
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Tuple = []
UpperCAmelCase : Dict = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
UpperCAmelCase : int = inits[j].data_type
UpperCAmelCase : Union[str, Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , _lowerCAmelCase )
total_reduced_size += mem_size
UpperCAmelCase : List[str] = inits[i].name
UpperCAmelCase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
UpperCAmelCase : List[Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
UpperCAmelCase : Union[str, Any] = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : List[Any] = '''optimized_''' + model_file_name
UpperCAmelCase : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
onnx.save(_lowerCAmelCase , _lowerCAmelCase )
return new_model
| 23
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
_lowerCamelCase : Any = TypeVar('T')
class lowercase ( Generic[T]):
def __init__( self : Tuple , _lowerCamelCase : T ):
"""simple docstring"""
A_ : Union[str, Any] = data
A_ : List[Any] = self
A_ : Optional[Any] = 0
class lowercase ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
A_ : dict[T, DisjointSetTreeNode[T]] = {}
def a_ ( self : List[str] , _lowerCamelCase : T ):
"""simple docstring"""
A_ : List[str] = DisjointSetTreeNode(_lowerCamelCase )
def a_ ( self : Dict , _lowerCamelCase : T ):
"""simple docstring"""
A_ : Any = self.map[data]
if elem_ref != elem_ref.parent:
A_ : Any = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def a_ ( self : Union[str, Any] , _lowerCamelCase : DisjointSetTreeNode[T] , _lowerCamelCase : DisjointSetTreeNode[T] ):
"""simple docstring"""
if nodea.rank > nodea.rank:
A_ : List[str] = nodea
else:
A_ : Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def a_ ( self : Optional[Any] , _lowerCamelCase : T , _lowerCamelCase : T ):
"""simple docstring"""
self.link(self.find_set(_lowerCamelCase ) , self.find_set(_lowerCamelCase ) )
class lowercase ( Generic[T]):
def __init__( self : Tuple ):
"""simple docstring"""
A_ : dict[T, dict[T, int]] = {}
def a_ ( self : List[Any] , _lowerCamelCase : T ):
"""simple docstring"""
if node not in self.connections:
A_ : Tuple = {}
def a_ ( self : Optional[Any] , _lowerCamelCase : T , _lowerCamelCase : T , _lowerCamelCase : int ):
"""simple docstring"""
self.add_node(_lowerCamelCase )
self.add_node(_lowerCamelCase )
A_ : int = weight
A_ : Dict = weight
def a_ ( self : Any ):
"""simple docstring"""
A_ : Tuple = []
A_ : Tuple = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _lowerCamelCase : x[2] )
# creating the disjoint set
A_ : Optional[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_lowerCamelCase )
# MST generation
A_ : Any = 0
A_ : Optional[int] = 0
A_ : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A_ , A_ , A_ : int = edges[index]
index += 1
A_ : Tuple = disjoint_set.find_set(_lowerCamelCase )
A_ : int = disjoint_set.find_set(_lowerCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
disjoint_set.union(_lowerCamelCase , _lowerCamelCase )
return graph
| 167
| 0
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = XLNetTokenizer
lowercase__ = XLNetTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Dict = XLNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = '<s>'
_UpperCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'<eod>' )
self.assertEqual(len(lowerCamelCase__ ) ,1006 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Dict = XLNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
_UpperCamelCase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[285, 46, 10, 170, 382] )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = XLNetTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] ,)
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['▁he', 'll', 'o'] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = XLNetTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ )
_UpperCamelCase : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] ,)
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
_UpperCamelCase : str = tokenizer.encode('sequence builders' ,add_special_tokens=lowerCamelCase__ )
_UpperCamelCase : List[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=lowerCamelCase__ )
_UpperCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
_UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
_UpperCamelCase : Tuple = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name='xlnet-base-cased' ,revision='c841166438c31ec7ca9a106dee7bb312b73ae511' ,)
| 236
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[str] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
snake_case_ : str = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
snake_case_ : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = NllbTokenizer
lowercase__ = []
lowercase__ = []
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : List[Any]="<s>" ,lowerCamelCase__ : Dict="</s>" ,lowerCamelCase__ : List[Any]="</s>" ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : List[Any]="<unk>" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Union[str, Any]=False ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
_UpperCamelCase : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,legacy_behaviour=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : int = vocab_file
_UpperCamelCase : int = False if not self.vocab_file else True
_UpperCamelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_UpperCamelCase : List[str] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCamelCase : List[str] = src_lang if src_lang is not None else 'eng_Latn'
_UpperCamelCase : int = self.convert_tokens_to_ids(self._src_lang )
_UpperCamelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Dict = [self.sep_token_id]
_UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_UpperCamelCase : Tuple = src_lang
_UpperCamelCase : Optional[Any] = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Tuple = self.convert_tokens_to_ids(lowerCamelCase__ )
_UpperCamelCase : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "eng_Latn" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "fra_Latn" ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : Tuple = src_lang
_UpperCamelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : int = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : List[Any] = [self.cur_lang_code]
_UpperCamelCase : List[Any] = [self.eos_token_id]
_UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
_UpperCamelCase : Tuple = []
_UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : Tuple = [self.cur_lang_code]
_UpperCamelCase : Optional[Any] = [self.eos_token_id]
_UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_UpperCamelCase : List[Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 236
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :Optional[int] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ :List[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
snake_case_ :Dict = """The dog is cute and lives in the garden house"""
snake_case_ :Dict = jnp.array([tokenizer.encode(snake_case )] )
snake_case_ :Optional[Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
snake_case_ :Optional[Any] = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
snake_case_ :List[Any] = model(snake_case )["""last_hidden_state"""]
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case , atol=1E-3 ) )
| 66
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344
| 0
|
import math
import qiskit
def __UpperCamelCase ( lowercase__ : int = 1 , lowercase__ : int = 1 , lowercase__ : int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
lowerCAmelCase_ : Union[str, Any] = qiskit.QuantumRegister(4 , """qr""" )
lowerCAmelCase_ : Optional[Any] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
lowerCAmelCase_ : str = [input_a, input_a, carry_in]
lowerCAmelCase_ : Optional[Any] = qiskit.QuantumCircuit(lowercase__ , lowercase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase__ ) # measure the last two qbits
lowerCAmelCase_ : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
lowerCAmelCase_ : Optional[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 28
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28
| 1
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BloomTokenizerFast
_SCREAMING_SNAKE_CASE = BloomTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """tokenizer_file"""
_SCREAMING_SNAKE_CASE = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def A ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[str] , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
UpperCamelCase = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
UpperCamelCase = tokenizer.batch_encode_plus(UpperCamelCase__ )['input_ids']
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : Tuple=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCamelCase = 'This is a simple input'
UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
UpperCamelCase = ('This is a simple input', 'This is a pair')
UpperCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
UpperCamelCase = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCamelCase__ )
UpperCamelCase = next(iter(UpperCamelCase__ ) )['premise'] # pick up one data
UpperCamelCase = list(sample_data.values() )
UpperCamelCase = list(map(tokenizer.encode , UpperCamelCase__ ) )
UpperCamelCase = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 28
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A__: Union[str, Any] = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
A__: Tuple = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A__: Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A__: List[Any] = requests.get(image_url).content
A__: List[str] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 276
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
return number | (1 << position)
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
return number & ~(1 << position)
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
return number ^ (1 << position)
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
return ((number >> position) & 1) == 1
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230
|
"""simple docstring"""
SCREAMING_SNAKE_CASE = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 230
| 1
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : int =get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _A ( UpperCamelCase__ , unittest.TestCase ):
snake_case__ : Any = PegasusTokenizer
snake_case__ : Optional[int] = PegasusTokenizerFast
snake_case__ : str = True
snake_case__ : Tuple = True
def A__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = PegasusTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return ("This is a test", "This is a test")
def A__ ( self ):
"""simple docstring"""
lowercase = """</s>"""
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(__lowerCAmelCase ) , 1103 )
def A__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowercase = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowercase = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowercase = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase = """To ensure a smooth flow of bank resolutions."""
lowercase = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowercase = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = ["""This is going to be way too long.""" * 150, """short example"""]
lowercase = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" )
lowercase = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class _A ( UpperCamelCase__ , unittest.TestCase ):
snake_case__ : List[Any] = PegasusTokenizer
snake_case__ : Dict = PegasusTokenizerFast
snake_case__ : Optional[int] = True
snake_case__ : List[str] = True
def A__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = PegasusTokenizer(__lowerCAmelCase , offset=0 , mask_token_sent=__lowerCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return ("This is a test", "This is a test")
def A__ ( self ):
"""simple docstring"""
lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowercase = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = ["""This is going to be way too long.""" * 1000, """short example"""]
lowercase = ["""not super long but more than 5 tokens""", """tiny"""]
lowercase = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" )
lowercase = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
def A__ ( self ):
"""simple docstring"""
lowercase = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowercase = self._large_tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(
__lowerCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 197
|
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]:
return [list(range(1000 - i ,-1000 - i ,-1 ) ) for i in range(1000 )]
lowerCamelCase : List[Any] = generate_large_matrix()
lowerCamelCase : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
assert all(row == sorted(lowercase ,reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase ,reverse=lowercase ) for col in zip(*lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Tuple = 0
snake_case : List[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case : Tuple = (left + right) // 2
snake_case : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case : List[Any] = mid + 1
else:
snake_case : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Dict = len(grid[0] )
for i in range(len(lowercase ) ):
snake_case : Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case : List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case : int = timeit(f"""{func}(grid=grid)""" ,setup=lowercase ,number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( lowerCamelCase__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 362
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : str = 'nllb-moe'
A_ : Optional[Any] = ['past_key_values']
A_ : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=12_81_12 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="float32" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE="all" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = vocab_size
__lowerCAmelCase : str = max_position_embeddings
__lowerCAmelCase : Dict = d_model
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Any = encoder_attention_heads
__lowerCAmelCase : Tuple = decoder_ffn_dim
__lowerCAmelCase : Dict = decoder_layers
__lowerCAmelCase : str = decoder_attention_heads
__lowerCAmelCase : str = dropout
__lowerCAmelCase : List[str] = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : List[Any] = activation_function
__lowerCAmelCase : List[str] = init_std
__lowerCAmelCase : Union[str, Any] = encoder_layerdrop
__lowerCAmelCase : List[Any] = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : Union[str, Any] = router_z_loss_coef
__lowerCAmelCase : Optional[Any] = router_aux_loss_coef
__lowerCAmelCase : int = decoder_sparse_step
__lowerCAmelCase : str = encoder_sparse_step
__lowerCAmelCase : Tuple = num_experts
__lowerCAmelCase : Dict = expert_capacity
__lowerCAmelCase : Union[str, Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowerCAmelCase : Union[str, Any] = router_dtype
__lowerCAmelCase : Any = router_ignore_padding_tokens
__lowerCAmelCase : str = batch_prioritized_routing
__lowerCAmelCase : Tuple = second_expert_policy
__lowerCAmelCase : List[str] = normalize_router_prob_before_dropping
__lowerCAmelCase : Dict = moe_eval_capacity_token_fraction
__lowerCAmelCase : Union[str, Any] = moe_token_dropout
__lowerCAmelCase : List[Any] = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 182
| 0
|
import unittest
from transformers import DonutProcessor
_UpperCAmelCase : Optional[int] = "naver-clova-ix/donut-base"
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :List[str] = DonutProcessor.from_pretrained(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :str = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
lowercase :Any = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
lowercase :Any = self.processor.tokenajson(_lowerCAmelCase )
self.assertDictEqual(_lowerCAmelCase , _lowerCAmelCase )
| 236
|
import os
from math import logaa
def UpperCAmelCase__ ( lowerCamelCase = "base_exp.txt" ):
lowercase :float = 0
lowercase :str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase ), lowerCamelCase ) ) ):
lowercase , lowercase :str = list(map(lowerCamelCase, line.split("," ) ) )
if x * logaa(lowerCamelCase ) > largest:
lowercase :Optional[Any] = x * logaa(lowerCamelCase )
lowercase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 236
| 1
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase_ = TypeVar("T")
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (position - 1) // 2
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (2 * position) + 1
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (2 * position) + 2
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
__a = []
__a = {}
__a = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def __UpperCAmelCase ( self ):
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , _a , _a ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__a = self.elements
self.elements += 1
self._bubble_up(_a )
def __UpperCAmelCase ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a = self.heap[0]
self._bubble_down(_a )
return elem
def __UpperCAmelCase ( self , _a , _a ):
# Update the weight of the given key
__a = self.position_map[elem]
__a = (elem, weight)
if position > 0:
__a = get_parent_position(_a )
__a , __a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_a )
else:
self._bubble_down(_a )
else:
self._bubble_down(_a )
def __UpperCAmelCase ( self , _a ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
__a = self.position_map[elem]
if curr_pos == 0:
return None
__a = get_parent_position(_a )
__a , __a = self.heap[curr_pos]
__a , __a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_a , _a )
return self._bubble_up(_a )
return None
def __UpperCAmelCase ( self , _a ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
__a = self.position_map[elem]
__a , __a = self.heap[curr_pos]
__a = get_child_left_position(_a )
__a = get_child_right_position(_a )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a = self.heap[child_left_position]
__a , __a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
if child_left_position < self.elements:
__a , __a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
else:
return None
if child_right_position < self.elements:
__a , __a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
return None
def __UpperCAmelCase ( self , _a , _a ):
# Swap the nodes at the given positions
__a = self.heap[nodea_pos][0]
__a = self.heap[nodea_pos][0]
__a , __a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a = nodea_pos
__a = nodea_pos
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
__a = {}
__a = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def __UpperCAmelCase ( self , _a ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__a = {}
self.nodes += 1
def __UpperCAmelCase ( self , _a , _a , _a ):
# Add an edge between 2 nodes in the graph
self.add_node(_a )
self.add_node(_a )
__a = weight
__a = weight
def lowercase ( lowerCAmelCase__ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__a = {node: maxsize for node in graph.connections}
__a = {node: None for node in graph.connections}
__a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a = priority_queue.extract_min()
__a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
__a = node
# running prim's algorithm
while not priority_queue.is_empty():
__a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
__a = node
return dist, parent
| 366
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """deta"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=9_0_0 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : int=6 , UpperCamelCase__ : List[Any]=2_0_4_8 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : Optional[int]=1_0_2_4 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any="relu" , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Optional[Any]=1.0 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int="sine" , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_0_0 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=1 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=0.2_5 , **UpperCamelCase__ : int , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = backbone_config.pop('model_type' )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(UpperCamelCase__ )
UpperCamelCase = backbone_config
UpperCamelCase = num_queries
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
# deformable attributes
UpperCamelCase = num_feature_levels
UpperCamelCase = encoder_n_points
UpperCamelCase = decoder_n_points
UpperCamelCase = two_stage
UpperCamelCase = two_stage_num_proposals
UpperCamelCase = with_box_refine
UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A ( self : Tuple ):
"""simple docstring"""
return self.d_model
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 28
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
| 1
|
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def snake_case ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : int = parser.parse_args()
return args.f
def snake_case ( A__ ,A__="eval" ):
UpperCAmelCase_ : Any = os.path.join(__lowerCAmelCase ,F"""{split}_results.json""" )
if os.path.exists(__lowerCAmelCase ):
with open(__lowerCAmelCase ,"r" ) as f:
return json.load(__lowerCAmelCase )
raise ValueError(F"""can\'t find {path}""" )
lowerCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase_ (A__ ):
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : List[Any] = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__snake_case , "argv" , __snake_case ):
run_flax_glue.main()
UpperCAmelCase_ : str = get_results(__snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Tuple = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__snake_case , "argv" , __snake_case ):
run_clm_flax.main()
UpperCAmelCase_ : Optional[int] = get_results(__snake_case )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__snake_case , "argv" , __snake_case ):
run_summarization_flax.main()
UpperCAmelCase_ : Optional[Any] = get_results(__snake_case , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__snake_case , "argv" , __snake_case ):
run_mlm_flax.main()
UpperCAmelCase_ : int = get_results(__snake_case )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_ : Tuple = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Any = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__snake_case , "argv" , __snake_case ):
run_ta_mlm_flax.main()
UpperCAmelCase_ : Dict = get_results(__snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ : Dict = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Optional[int] = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__snake_case , "argv" , __snake_case ):
run_flax_ner.main()
UpperCAmelCase_ : Optional[Any] = get_results(__snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Tuple = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__snake_case , "argv" , __snake_case ):
run_qa.main()
UpperCAmelCase_ : int = get_results(__snake_case )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 353
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : List[str] = do_rescale
UpperCAmelCase_ : Dict = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
UpperCAmelCase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : List[Any] = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Union[str, Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Any = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : Any = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Tuple] = None ) -> Optional[int]:
UpperCAmelCase_ : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = target_sizes.numpy()
UpperCAmelCase_ : Dict = []
for idx in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase_ )
UpperCAmelCase_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = logits.argmax(dim=1 )
UpperCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 253
| 0
|
def _lowerCAmelCase ( __lowerCAmelCase = 10**12 ) -> int:
"""simple docstring"""
snake_case__ : List[str] = 1
snake_case__ : Optional[Any] = 0
snake_case__ : Union[str, Any] = 1
snake_case__ : Union[str, Any] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 230
|
A__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case__ : Optional[int] = year // 100
snake_case__ : List[str] = (5 * (century % 4) + 2) % 7
snake_case__ : Dict = year % 100
snake_case__ : Union[str, Any] = centurian % 12
snake_case__ : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case__ : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case__ : List[str] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230
| 1
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ = TypeVar('''KEY''')
A_ = TypeVar('''VAL''')
@dataclass(frozen=UpperCamelCase , slots=UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
snake_case_ = 42
snake_case_ = 42
class __SCREAMING_SNAKE_CASE ( _Item ):
def __init__( self : Optional[Any] ):
'''simple docstring'''
super().__init__(snake_case , snake_case )
def __bool__( self : str ):
'''simple docstring'''
return False
A_ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
def __init__( self : Any , snake_case : int = 8 , snake_case : float = 0.75 ):
'''simple docstring'''
A__ : List[Any] = initial_block_size
A__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A__ : Dict = capacity_factor
A__ : Dict = 0
def _UpperCamelCase ( self : Optional[int] , snake_case : KEY ):
'''simple docstring'''
return hash(snake_case ) % len(self._buckets )
def _UpperCamelCase ( self : Tuple , snake_case : int ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Any , snake_case : int , snake_case : KEY , snake_case : VAL ):
'''simple docstring'''
A__ : int = self._buckets[ind]
if not stored:
A__ : Optional[Any] = _Item(snake_case , snake_case )
self._len += 1
return True
elif stored.key == key:
A__ : int = _Item(snake_case , snake_case )
return True
else:
return False
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
A__ : List[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : Any , snake_case : int ):
'''simple docstring'''
A__ : int = self._buckets
A__ : Dict = [None] * new_size
A__ : Union[str, Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Optional[Any] , snake_case : KEY ):
'''simple docstring'''
A__ : Optional[Any] = self._get_bucket_index(snake_case )
for _ in range(len(self._buckets ) ):
yield ind
A__ : Tuple = self._get_next_ind(snake_case )
def _UpperCamelCase ( self : Dict , snake_case : KEY , snake_case : VAL ):
'''simple docstring'''
for ind in self._iterate_buckets(snake_case ):
if self._try_set(snake_case , snake_case , snake_case ):
break
def __setitem__( self : str , snake_case : KEY , snake_case : VAL ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(snake_case , snake_case )
def __delitem__( self : Optional[int] , snake_case : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(snake_case ):
A__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(snake_case )
if item is _deleted:
continue
if item.key == key:
A__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , snake_case : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(snake_case ):
A__ : List[str] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(snake_case )
def __len__( self : Any ):
'''simple docstring'''
return self._len
def __iter__( self : Tuple ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = """ ,""".join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 296
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296
| 1
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 74
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( _lowercase ):
if "model" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE : Tuple = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.split('''.''' )[0].split('''_''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE : Dict = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE : List[str] = '''yoso.''' + orig_key
return orig_key
def A ( _lowercase , _lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = val
SCREAMING_SNAKE_CASE : List[str] = orig_state_dict['''cls.predictions.decoder.bias''']
SCREAMING_SNAKE_CASE : Dict = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = torch.load(_lowercase , map_location='''cpu''' )['''model_state_dict''']
SCREAMING_SNAKE_CASE : List[Any] = YosoConfig.from_json_file(_lowercase )
SCREAMING_SNAKE_CASE : str = YosoForMaskedLM(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 182
| 0
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int, lowerCamelCase : Tuple=13, lowerCamelCase : Any=7, lowerCamelCase : Dict=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : List[Any]=False, lowerCamelCase : Dict=True, lowerCamelCase : int=99, lowerCamelCase : Optional[int]=32, lowerCamelCase : int=5, lowerCamelCase : List[str]=4, lowerCamelCase : int=37, lowerCamelCase : int="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : Dict=16, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : List[Any]=3, lowerCamelCase : Any=4, lowerCamelCase : Union[str, Any]=None, )-> Dict:
lowerCamelCase__ : Optional[Any] =parent
lowerCamelCase__ : Optional[int] =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : str =is_training
lowerCamelCase__ : int =use_input_mask
lowerCamelCase__ : Dict =use_token_type_ids
lowerCamelCase__ : Optional[int] =use_labels
lowerCamelCase__ : Tuple =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =type_vocab_size
lowerCamelCase__ : Union[str, Any] =type_sequence_label_size
lowerCamelCase__ : Union[str, Any] =initializer_range
lowerCamelCase__ : List[Any] =num_labels
lowerCamelCase__ : Dict =num_choices
lowerCamelCase__ : Tuple =scope
def snake_case ( self : Optional[Any] )-> Optional[int]:
lowerCamelCase__ : Dict =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : str =None
if self.use_input_mask:
lowerCamelCase__ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Dict =None
if self.use_token_type_ids:
lowerCamelCase__ : int =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Any =None
lowerCamelCase__ : Dict =None
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Any =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Union[str, Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Dict )-> Optional[Any]:
return BioGptConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =BioGptModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase__ : str =model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
lowerCamelCase__ : Dict =model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : int, lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], )-> List[str]:
lowerCamelCase__ : Tuple =BioGptForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] =model(lowerCamelCase__, attention_mask=lowerCamelCase__, token_type_ids=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Tuple, *lowerCamelCase : Optional[Any] )-> Optional[int]:
lowerCamelCase__ : int =BioGptModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# create attention mask
lowerCamelCase__ : Union[str, Any] =torch.ones(input_ids.shape, dtype=torch.long, device=lowerCamelCase__ )
lowerCamelCase__ : str =self.seq_length // 2
lowerCamelCase__ : Dict =0
# first forward pass
lowerCamelCase__ : List[str] =model(lowerCamelCase__, attention_mask=lowerCamelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 1), config.vocab_size )
# change a random masked slice from input_ids
lowerCamelCase__ : List[Any] =ids_tensor((1,), lowerCamelCase__ ).item() + 1
lowerCamelCase__ : List[Any] =ids_tensor((self.batch_size, 1), config.vocab_size ).squeeze(-1 )
lowerCamelCase__ : Dict =random_other_next_tokens
# append to next input_ids and attn_mask
lowerCamelCase__ : List[str] =torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : str =torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=lowerCamelCase__ )], dim=1, )
# get two different outputs
lowerCamelCase__ : Tuple =model(lowerCamelCase__, attention_mask=lowerCamelCase__ )['''last_hidden_state''']
lowerCamelCase__ : Optional[int] =model(lowerCamelCase__, past_key_values=lowerCamelCase__, attention_mask=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
lowerCamelCase__ : Union[str, Any] =ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : int =output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1E-3 ) )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Dict, *lowerCamelCase : str )-> int:
lowerCamelCase__ : Optional[int] =BioGptModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
lowerCamelCase__ : Any =torch.ones(input_ids.shape, dtype=torch.long, device=lowerCamelCase__ )
# first forward pass
lowerCamelCase__ : str =model(lowerCamelCase__, attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__ )
lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : int =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Optional[Any] =ids_tensor((self.batch_size, 3), 2 )
# append to next input_ids and
lowerCamelCase__ : str =torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Any =torch.cat([attention_mask, next_attn_mask], dim=-1 )
lowerCamelCase__ : int =model(lowerCamelCase__, attention_mask=lowerCamelCase__ )['''last_hidden_state''']
lowerCamelCase__ : int =model(lowerCamelCase__, attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__ )[
'''last_hidden_state'''
]
# select random slice
lowerCamelCase__ : int =ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Any =output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1E-3 ) )
def snake_case ( self : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : Any, *lowerCamelCase : List[str], lowerCamelCase : Tuple=False )-> Optional[Any]:
lowerCamelCase__ : List[str] =BioGptForCausalLM(lowerCamelCase__ )
model.to(lowerCamelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCamelCase__ : List[str] =model(lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self : List[str], lowerCamelCase : Any, *lowerCamelCase : List[Any] )-> str:
lowerCamelCase__ : Tuple =BioGptModel(lowerCamelCase__ )
lowerCamelCase__ : List[Any] =model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ), 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ), 0.01 )
def snake_case ( self : Tuple, lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str, *lowerCamelCase : Optional[Any] )-> int:
lowerCamelCase__ : List[str] =self.num_labels
lowerCamelCase__ : List[Any] =BioGptForTokenClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase__ : str =model(lowerCamelCase__, attention_mask=lowerCamelCase__, token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Any )-> Optional[Any]:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
lowerCamelCase__
) : int =config_and_inputs
lowerCamelCase__ : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_a = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_a = (BioGptForCausalLM,) if is_torch_available() else ()
_a = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : Optional[int] =BioGptModelTester(self )
lowerCamelCase__ : Any =ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def snake_case ( self : Tuple )-> str:
self.config_tester.run_common_tests()
def snake_case ( self : Union[str, Any] )-> Tuple:
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case ( self : List[str] )-> Dict:
lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Optional[int] =type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase__ )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase__, gradient_checkpointing=lowerCamelCase__ )
def snake_case ( self : Union[str, Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase__ )
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase__ )
def snake_case ( self : Optional[Any] )-> List[Any]:
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase__ )
@slow
def snake_case ( self : str )-> int:
lowerCamelCase__ : Any =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase__ )
lowerCamelCase__ : List[str] =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCamelCase__ : int ='''left'''
# Define PAD Token = EOS Token = 50256
lowerCamelCase__ : Tuple =tokenizer.eos_token
lowerCamelCase__ : Optional[Any] =model.config.eos_token_id
# use different length sentences to test batching
lowerCamelCase__ : str =[
'''Hello, my dog is a little''',
'''Today, I''',
]
lowerCamelCase__ : Union[str, Any] =tokenizer(lowerCamelCase__, return_tensors='''pt''', padding=lowerCamelCase__ )
lowerCamelCase__ : Any =inputs['''input_ids'''].to(lowerCamelCase__ )
lowerCamelCase__ : List[str] =model.generate(
input_ids=lowerCamelCase__, attention_mask=inputs['''attention_mask'''].to(lowerCamelCase__ ), )
lowerCamelCase__ : int =tokenizer(sentences[0], return_tensors='''pt''' ).input_ids.to(lowerCamelCase__ )
lowerCamelCase__ : Union[str, Any] =model.generate(input_ids=lowerCamelCase__ )
lowerCamelCase__ : List[str] =inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
lowerCamelCase__ : Optional[Any] =tokenizer(sentences[1], return_tensors='''pt''' ).input_ids.to(lowerCamelCase__ )
lowerCamelCase__ : Optional[Any] =model.generate(input_ids=lowerCamelCase__, max_length=model.config.max_length - num_paddings )
lowerCamelCase__ : Dict =tokenizer.batch_decode(lowerCamelCase__, skip_special_tokens=lowerCamelCase__ )
lowerCamelCase__ : Dict =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCamelCase__ )
lowerCamelCase__ : int =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCamelCase__ )
lowerCamelCase__ : str =[
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self : Union[str, Any] )-> int:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =BioGptModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case ( self : str )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : int =input_dict['''input_ids''']
lowerCamelCase__ : int =input_ids.ne(1 ).to(lowerCamelCase__ )
lowerCamelCase__ : List[str] =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Tuple =BioGptForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] =model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self : List[str] )-> List[Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict =3
lowerCamelCase__ : Tuple ='''multi_label_classification'''
lowerCamelCase__ : int =input_dict['''input_ids''']
lowerCamelCase__ : List[str] =input_ids.ne(1 ).to(lowerCamelCase__ )
lowerCamelCase__ : int =ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : Any =BioGptForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : Dict =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
lowerCamelCase__ : Any =torch.tensor([[2, 4805, 9, 656, 21]] )
lowerCamelCase__ : List[Any] =model(lowerCamelCase__ )[0]
lowerCamelCase__ : Any =4_2384
lowerCamelCase__ : Dict =torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape, lowerCamelCase__ )
lowerCamelCase__ : Dict =torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1E-4 ) )
@slow
def snake_case ( self : Tuple )-> str:
lowerCamelCase__ : Union[str, Any] =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCamelCase__ : Any =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase__ )
torch.manual_seed(0 )
lowerCamelCase__ : int =tokenizer('''COVID-19 is''', return_tensors='''pt''' ).to(lowerCamelCase__ )
lowerCamelCase__ : int =model.generate(
**lowerCamelCase__, min_length=100, max_length=1024, num_beams=5, early_stopping=lowerCamelCase__, )
lowerCamelCase__ : str =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCamelCase__ )
lowerCamelCase__ : List[str] =(
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
| 357
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 272
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = tokenizer("""This is me""" , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : str = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE__ : Any = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 132
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """donut-swin"""
lowerCAmelCase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self :Optional[int] , _UpperCamelCase :int=224 , _UpperCamelCase :int=4 , _UpperCamelCase :Any=3 , _UpperCamelCase :List[Any]=96 , _UpperCamelCase :Any=[2, 2, 6, 2] , _UpperCamelCase :Optional[int]=[3, 6, 12, 24] , _UpperCamelCase :int=7 , _UpperCamelCase :Optional[int]=4.0 , _UpperCamelCase :str=True , _UpperCamelCase :str=0.0 , _UpperCamelCase :str=0.0 , _UpperCamelCase :Optional[Any]=0.1 , _UpperCamelCase :Optional[Any]="gelu" , _UpperCamelCase :Dict=False , _UpperCamelCase :Optional[int]=0.0_2 , _UpperCamelCase :Tuple=1e-5 , **_UpperCamelCase :Any , )-> List[Any]:
super().__init__(**_UpperCamelCase )
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = depths
__A = len(_UpperCamelCase )
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = layer_norm_eps
__A = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A = int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
| 352
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _a ( lowerCamelCase: List[str] ) -> Tuple:
'''simple docstring'''
__A = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__A = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__A = 4
__A = 48
__A = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__A = [6, 6, 6, 6]
__A = 60
__A = [6, 6, 6, 6]
__A = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__A = 4
__A = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__A = 1
__A = 1
__A = 1_26
__A = 7
__A = 255.0
__A = ''''''
return config
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: Optional[int] ) -> Optional[int]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__A = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
__A = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
__A = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
__A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__A = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__A = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__A = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
__A = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
__A = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
__A = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
__A = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
__A = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
__A = '''layernorm.weight'''
if name == "norm.bias":
__A = '''layernorm.bias'''
if "conv_first" in name:
__A = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__A = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__A = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
__A = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
__A = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
__A = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
__A = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
__A = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
__A = '''swin2sr.''' + name
return name
def _a ( lowerCamelCase: int , lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
__A = key.split('''.''' )
__A = int(key_split[1] )
__A = int(key_split[4] )
__A = config.embed_dim
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
else:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
pass
else:
__A = val
return orig_state_dict
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: int , lowerCamelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
__A = get_config(lowerCamelCase )
__A = SwinaSRForImageSuperResolution(lowerCamelCase )
model.eval()
__A = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='''cpu''' )
__A = convert_state_dict(lowerCamelCase , lowerCamelCase )
__A , __A = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(lowerCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
__A = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
__A = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert('''RGB''' )
__A = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__A = 1_26 if '''Jpeg''' in checkpoint_url else 2_56
__A = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__A = transforms(lowerCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
__A = pixel_values[:, 0, :, :].unsqueeze(1 )
__A = model(lowerCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__A = torch.Size([1, 3, 5_12, 5_12] )
__A = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__A = torch.Size([1, 3, 10_24, 10_24] )
__A = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__A = torch.Size([1, 3, 10_24, 10_24] )
__A = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__A = torch.Size([1, 3, 5_12, 5_12] )
__A = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__A = torch.Size([1, 3, 10_24, 10_24] )
__A = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase , atol=1e-3 )
print('''Looks ok!''' )
__A = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
__A = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
snake_case__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
snake_case__ : str = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 250
| 0
|
def A_ ( a ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCAmelCase : Dict = int(input('Enter number: ').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 253
|
from jiwer import compute_measures
import datasets
lowerCAmelCase : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCAmelCase : List[Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCAmelCase : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _A ( datasets.Metric):
def UpperCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["wer"]
else:
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 253
| 1
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = old_name
if "patch_embed" in old_name:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = old_name.split('''.''' )
if layer == "0":
UpperCamelCase__ = old_name.replace('''0''', '''convolution1''' )
elif layer == "1":
UpperCamelCase__ = old_name.replace('''1''', '''batchnorm_before''' )
elif layer == "3":
UpperCamelCase__ = old_name.replace('''3''', '''convolution2''' )
else:
UpperCamelCase__ = old_name.replace('''4''', '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''', UpperCamelCase__ ):
UpperCamelCase__ = r'''\b\d{2}\b'''
if bool(re.search(UpperCamelCase__, UpperCamelCase__ ) ):
UpperCamelCase__ = re.search(r'''\d\.\d\d.''', UpperCamelCase__ ).group()
else:
UpperCamelCase__ = re.search(r'''\d\.\d.''', UpperCamelCase__ ).group()
if int(match[0] ) < 6:
UpperCamelCase__ = old_name.replace(UpperCamelCase__, '''''' )
UpperCamelCase__ = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
UpperCamelCase__ = '''intermediate_stages.''' + trimmed_name
else:
UpperCamelCase__ = old_name.replace(UpperCamelCase__, '''''' )
if int(match[2] ) < num_meta4D_last_stage:
UpperCamelCase__ = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2] )
else:
UpperCamelCase__ = str(int(match[2] ) - num_meta4D_last_stage )
UpperCamelCase__ = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
UpperCamelCase__ = trimmed_name.replace('''norm1''', '''layernorm1''' )
elif "norm2" in old_name:
UpperCamelCase__ = trimmed_name.replace('''norm2''', '''layernorm2''' )
elif "fc1" in old_name:
UpperCamelCase__ = trimmed_name.replace('''fc1''', '''linear_in''' )
elif "fc2" in old_name:
UpperCamelCase__ = trimmed_name.replace('''fc2''', '''linear_out''' )
UpperCamelCase__ = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', UpperCamelCase__ ):
UpperCamelCase__ = old_name.replace('''network''', '''intermediate_stages''' )
if "fc" in new_name:
UpperCamelCase__ = new_name.replace('''fc''', '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCamelCase__ = new_name.replace('''norm1''', '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCamelCase__ = new_name.replace('''norm2''', '''batchnorm_after''' )
if "proj" in new_name:
UpperCamelCase__ = new_name.replace('''proj''', '''projection''' )
if "dist_head" in new_name:
UpperCamelCase__ = new_name.replace('''dist_head''', '''distillation_classifier''' )
elif "head" in new_name:
UpperCamelCase__ = new_name.replace('''head''', '''classifier''' )
elif "patch_embed" in new_name:
UpperCamelCase__ = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCamelCase__ = new_name.replace('''norm''', '''layernorm''' )
UpperCamelCase__ = '''efficientformer.''' + new_name
else:
UpperCamelCase__ = '''efficientformer.encoder.''' + new_name
return new_name
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict ):
'''simple docstring'''
for key in checkpoint.copy().keys():
UpperCamelCase__ = checkpoint.pop(UpperCamelCase__ )
UpperCamelCase__ = val
return checkpoint
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return image
def lowerCamelCase_ ( UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : bool ):
'''simple docstring'''
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
UpperCamelCase__ = EfficientFormerConfig.from_json_file(UpperCamelCase__ )
UpperCamelCase__ = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
UpperCamelCase__ = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
UpperCamelCase__ = config.depths[-1] - config.num_metaad_blocks + 1
UpperCamelCase__ = convert_torch_checkpoint(UpperCamelCase__, UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
UpperCamelCase__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = 256
UpperCamelCase__ = 224
UpperCamelCase__ = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
UpperCamelCase__ = processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values
# original processing pipeline
UpperCamelCase__ = Compose(
[
Resize(UpperCamelCase__, interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__, UpperCamelCase__ ),
] )
UpperCamelCase__ = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = model(UpperCamelCase__ )
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = (1, 1000)
if "l1" in model_name:
UpperCamelCase__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCamelCase__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCamelCase__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(UpperCamelCase__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=UpperCamelCase__, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=UpperCamelCase__, )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowercase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 356
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.