code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ : str = logging.getLogger(__name__)
UpperCamelCase_ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCamelCase_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__lowerCAmelCase )} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """The input training data file (a text file)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=__lowerCAmelCase , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
SCREAMING_SNAKE_CASE_ : bool = field(default=__lowerCAmelCase , metadata={"""help""": """Whether ot not to use whole word mask."""} )
SCREAMING_SNAKE_CASE_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
SCREAMING_SNAKE_CASE_ : float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
SCREAMING_SNAKE_CASE_ : int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __a ( _UpperCamelCase: DataTrainingArguments , _UpperCamelCase: PreTrainedTokenizer , _UpperCamelCase: bool = False , _UpperCamelCase: Optional[str] = None , ) -> List[str]:
"""simple docstring"""
def _dataset(_UpperCamelCase: Dict , _UpperCamelCase: Any=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=_UpperCamelCase , file_path=_UpperCamelCase , block_size=args.block_size , ref_path=_UpperCamelCase , )
return LineByLineTextDataset(tokenizer=_UpperCamelCase , file_path=_UpperCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_UpperCamelCase , file_path=_UpperCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __a ( ) -> Any:
"""simple docstring"""
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_snake_case = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_snake_case = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
_snake_case = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_snake_case = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
_snake_case = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
_snake_case = AutoModelWithLMHead.from_config(_UpperCamelCase )
model.resize_token_embeddings(len(_UpperCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
_snake_case = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_snake_case = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_snake_case = (
get_dataset(_UpperCamelCase , tokenizer=_UpperCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_snake_case = (
get_dataset(_UpperCamelCase , tokenizer=_UpperCamelCase , evaluate=_UpperCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_snake_case = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_snake_case = DataCollatorForWholeWordMask(
tokenizer=_UpperCamelCase , mlm_probability=data_args.mlm_probability )
else:
_snake_case = DataCollatorForLanguageModeling(
tokenizer=_UpperCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_snake_case = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , data_collator=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , prediction_loss_only=_UpperCamelCase , )
# Training
if training_args.do_train:
_snake_case = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_UpperCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_snake_case = trainer.evaluate()
_snake_case = math.exp(eval_output["eval_loss"] )
_snake_case = {"perplexity": perplexity}
_snake_case = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(_UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _UpperCamelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(_UpperCamelCase )
return results
def __a ( _UpperCamelCase: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 185
|
'''simple docstring'''
from math import factorial
def __a ( _UpperCamelCase: int = 100 ) -> int:
"""simple docstring"""
return sum(map(_UpperCamelCase , str(factorial(_UpperCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 185
| 1
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''Salesforce/codegen-350M-mono''': 2_048,
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Any = VOCAB_FILES_NAMES
__snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Optional[int] = ["input_ids", "attention_mask"]
__snake_case : str = CodeGenTokenizer
def __init__( self: Dict , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Optional[int]="<|endoftext|>" , UpperCAmelCase_: int="<|endoftext|>" , UpperCAmelCase_: List[str]="<|endoftext|>" , UpperCAmelCase_: Tuple=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
if kwargs.pop("""add_bos_token""" , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[Any] , *UpperCAmelCase_: int , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Union[str, Any] , **UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = None , UpperCAmelCase_: Optional[List[str]] = None , **UpperCAmelCase_: Optional[int] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = super().decode(
token_ids=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
if truncate_before_pattern is not None and len(UpperCAmelCase_ ) > 0:
_SCREAMING_SNAKE_CASE = self.truncate(UpperCAmelCase_ , UpperCAmelCase_ )
return decoded_text
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
def find_re(UpperCAmelCase_: Dict , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] ):
_SCREAMING_SNAKE_CASE = pattern.search(UpperCAmelCase_ , UpperCAmelCase_ )
return m.start() if m else -1
_SCREAMING_SNAKE_CASE = [re.compile(UpperCAmelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
_SCREAMING_SNAKE_CASE = list(re.finditer("""^print""" , UpperCAmelCase_ , re.MULTILINE ) )
if len(UpperCAmelCase_ ) > 1:
_SCREAMING_SNAKE_CASE = completion[: prints[1].start()]
_SCREAMING_SNAKE_CASE = list(re.finditer("""^def""" , UpperCAmelCase_ , re.MULTILINE ) )
if len(UpperCAmelCase_ ) > 1:
_SCREAMING_SNAKE_CASE = completion[: defs[1].start()]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
pos for pos in [find_re(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCAmelCase_ ) > 0:
return completion[: min(UpperCAmelCase_ )]
else:
return completion
| 569
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
with open(snake_case__ ) as metadata_file:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=snake_case__ ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
# Load the entity vocab file
_SCREAMING_SNAKE_CASE = load_entity_vocab(snake_case__ )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_SCREAMING_SNAKE_CASE = AddedToken("""<ent>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
_SCREAMING_SNAKE_CASE = AddedToken("""<ent2>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
_SCREAMING_SNAKE_CASE = state_dict["""embeddings.word_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_SCREAMING_SNAKE_CASE = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["""[MASK]"""]]
_SCREAMING_SNAKE_CASE = LukeModel(config=snake_case__ ).eval()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(snake_case__ ,strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ ,task="""entity_classification""" )
_SCREAMING_SNAKE_CASE = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_SCREAMING_SNAKE_CASE = (39, 42)
_SCREAMING_SNAKE_CASE = tokenizer(snake_case__ ,entity_spans=[span] ,add_prefix_space=snake_case__ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
with open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.rstrip().split("""\t""" )
_SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 569
| 1
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> int:
'''simple docstring'''
A__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_0_2_4,
"hidden_size": 7_6_8,
"max_length": 5_1_2,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_0_2_4,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
A__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=SCREAMING_SNAKE_CASE_ , output_all_encodings=SCREAMING_SNAKE_CASE_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , SCREAMING_SNAKE_CASE_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
A__ = os.path.join(get_home_dir() , "models" )
A__ = _load_vocab(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cls=SCREAMING_SNAKE_CASE_ )
A__ = nlp.model.BERTModel(
SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=SCREAMING_SNAKE_CASE_ , use_token_type_embed=SCREAMING_SNAKE_CASE_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=SCREAMING_SNAKE_CASE_ , use_decoder=SCREAMING_SNAKE_CASE_ , )
original_bort.load_parameters(SCREAMING_SNAKE_CASE_ , cast_dtype=SCREAMING_SNAKE_CASE_ , ignore_extra=SCREAMING_SNAKE_CASE_ )
A__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(SCREAMING_SNAKE_CASE_ ),
}
A__ = BertConfig.from_dict(SCREAMING_SNAKE_CASE_ )
A__ = BertForMaskedLM(SCREAMING_SNAKE_CASE_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(SCREAMING_SNAKE_CASE_: List[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ):
A__ = hf_param.shape
A__ = to_torch(params[gluon_param] )
A__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ = hf_bort_model.bert.encoder.layer[i]
# self attention
A__ = layer.attention.self
A__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
A__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
A__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
A__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
A__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
A__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
A__ = layer.attention.output
A__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
A__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
A__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
A__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
A__ = layer.intermediate
A__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
A__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
A__ = layer.output
A__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
A__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
A__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
A__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ = RobertaTokenizer.from_pretrained("roberta-base" )
A__ = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ )["input_ids"]
# Get gluon output
A__ = mx.nd.array([input_ids] )
A__ = original_bort(inputs=SCREAMING_SNAKE_CASE_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
A__ = BertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
hf_bort_model.eval()
A__ = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
A__ = hf_bort_model(**SCREAMING_SNAKE_CASE_ )[0]
A__ = output_gluon[0].asnumpy()
A__ = output_hf[0].detach().numpy()
A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ = np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 514
|
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'tapas'
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1024 , lowercase=[3, 256, 256, 2, 256, 256, 10] , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase=10.0 , lowercase=0 , lowercase=1.0 , lowercase=None , lowercase=1.0 , lowercase=False , lowercase=None , lowercase=1.0 , lowercase=1.0 , lowercase=False , lowercase=False , lowercase="ratio" , lowercase=None , lowercase=None , lowercase=64 , lowercase=32 , lowercase=False , lowercase=True , lowercase=False , lowercase=False , lowercase=True , lowercase=False , lowercase=None , lowercase=None , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_sizes
A__ = initializer_range
A__ = layer_norm_eps
# Fine-tuning task hyperparameters
A__ = positive_label_weight
A__ = num_aggregation_labels
A__ = aggregation_loss_weight
A__ = use_answer_as_supervision
A__ = answer_loss_importance
A__ = use_normalized_answer_loss
A__ = huber_loss_delta
A__ = temperature
A__ = aggregation_temperature
A__ = use_gumbel_for_cells
A__ = use_gumbel_for_aggregation
A__ = average_approximation_function
A__ = cell_selection_preference
A__ = answer_loss_cutoff
A__ = max_num_rows
A__ = max_num_columns
A__ = average_logits_per_cell
A__ = select_one_column
A__ = allow_empty_column_selection
A__ = init_cell_selection_weights_to_zero
A__ = reset_position_index_per_cell
A__ = disable_per_token_loss
# Aggregation hyperparameters
A__ = aggregation_labels
A__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowercase ):
A__ = {int(lowercase ): v for k, v in aggregation_labels.items()}
| 514
| 1
|
def _lowerCAmelCase ( _a : int , _a : int , _a : list[list[int]] ) -> int:
def update_area_of_max_square(_a : int , _a : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCAmelCase_ : Optional[int] = update_area_of_max_square(_a , col + 1 )
lowerCAmelCase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowerCAmelCase_ : Any = update_area_of_max_square(row + 1 , _a )
if mat[row][col]:
lowerCAmelCase_ : Optional[Any] = 1 + min([right, diagonal, down] )
lowerCAmelCase_ : Optional[Any] = max(largest_square_area[0] , _a )
return sub_problem_sol
else:
return 0
lowerCAmelCase_ : Optional[int] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _lowerCAmelCase ( _a : int , _a : int , _a : list[list[int]] ) -> int:
def update_area_of_max_square_using_dp_array(
_a : int , _a : int , _a : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCAmelCase_ : Optional[Any] = update_area_of_max_square_using_dp_array(_a , col + 1 , _a )
lowerCAmelCase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _a )
lowerCAmelCase_ : int = update_area_of_max_square_using_dp_array(row + 1 , _a , _a )
if mat[row][col]:
lowerCAmelCase_ : Tuple = 1 + min([right, diagonal, down] )
lowerCAmelCase_ : List[Any] = max(largest_square_area[0] , _a )
lowerCAmelCase_ : Tuple = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCAmelCase_ : Tuple = [0]
lowerCAmelCase_ : str = [[-1] * cols for _ in range(_a )]
update_area_of_max_square_using_dp_array(0 , 0 , _a )
return largest_square_area[0]
def _lowerCAmelCase ( _a : int , _a : int , _a : list[list[int]] ) -> int:
lowerCAmelCase_ : Any = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCAmelCase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase_ : Optional[int] = dp_array[row][col + 1]
lowerCAmelCase_ : int = dp_array[row + 1][col + 1]
lowerCAmelCase_ : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCAmelCase_ : int = 1 + min(_a , _a , _a )
lowerCAmelCase_ : Tuple = max(dp_array[row][col] , _a )
else:
lowerCAmelCase_ : Union[str, Any] = 0
return largest_square_area
def _lowerCAmelCase ( _a : int , _a : int , _a : list[list[int]] ) -> int:
lowerCAmelCase_ : Dict = [0] * (cols + 1)
lowerCAmelCase_ : int = [0] * (cols + 1)
lowerCAmelCase_ : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase_ : List[str] = current_row[col + 1]
lowerCAmelCase_ : Any = next_row[col + 1]
lowerCAmelCase_ : Dict = next_row[col]
if mat[row][col] == 1:
lowerCAmelCase_ : str = 1 + min(_a , _a , _a )
lowerCAmelCase_ : Optional[Any] = max(current_row[col] , _a )
else:
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 440
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowercase__ ( __A ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
class lowercase__ ( __A , __A ):
__UpperCamelCase = 2
@register_to_config
def __init__( self , _lowercase = 0.02 , _lowercase = 100 , _lowercase = 1.007 , _lowercase = 80 , _lowercase = 0.05 , _lowercase = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : int = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
return sample
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
lowerCAmelCase_ : int = num_inference_steps
lowerCAmelCase_ : int = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : List[Any] = torch.from_numpy(_lowercase ).to(_lowercase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Optional[int] = torch.tensor(_lowercase , dtype=torch.floataa , device=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : List[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=_lowercase ).to(sample.device )
lowerCAmelCase_ : Optional[int] = sigma + gamma * sigma
lowerCAmelCase_ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
lowerCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
lowerCAmelCase_ : str = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
raise NotImplementedError()
| 440
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class UpperCAmelCase_ (__snake_case ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ["""input_features"""]
def __init__( self : List[str] , a_ : Tuple=80 , a_ : Tuple=1_60_00 , a_ : Any=1_60 , a_ : Optional[Any]=30 , a_ : Optional[int]=4_00 , a_ : int=0.0 , a_ : Dict=False , **a_ : Optional[Any] , )-> List[Any]:
"""simple docstring"""
super().__init__(
feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : Any = n_fft
UpperCAmelCase_ : Union[str, Any] = hop_length
UpperCAmelCase_ : Optional[Any] = chunk_length
UpperCAmelCase_ : Optional[int] = chunk_length * sampling_rate
UpperCAmelCase_ : Tuple = self.n_samples // hop_length
UpperCAmelCase_ : List[str] = sampling_rate
UpperCAmelCase_ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase_ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowerCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , )
def a ( self : Optional[int] , a_ : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
UpperCAmelCase_ : Optional[Any] = log_spec[:, :-1]
UpperCAmelCase_ : Any = np.maximum(lowerCAmelCase_ , log_spec.max() - 8.0 )
UpperCAmelCase_ : Tuple = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a ( a_ : Union[str, Any] , a_ : int , a_ : int = 0.0 )-> Dict:
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase_ : Tuple = np.array(lowerCAmelCase_ , np.intaa )
UpperCAmelCase_ : Dict = []
for vector, length in zip(lowerCAmelCase_ , attention_mask.sum(-1 ) ):
UpperCAmelCase_ : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase_ : Union[str, Any] = padding_value
normed_input_values.append(lowerCAmelCase_ )
else:
UpperCAmelCase_ : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , a_ : Any , a_ : Tuple = True , a_ : Dict = None , a_ : List[Any] = None , a_ : List[Any] = None , a_ : Union[str, Any] = "max_length" , a_ : Union[str, Any] = None , a_ : Optional[int] = None , a_ : Optional[int] = None , **a_ : Tuple , )-> str:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase_ : Optional[int] = isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase_ : Any = is_batched_numpy or (
isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ):
UpperCAmelCase_ : List[Any] = np.asarray(lowerCAmelCase_ , dtype=np.floataa )
elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : Optional[Any] = [np.asarray([raw_speech] ).T]
UpperCAmelCase_ : str = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
UpperCAmelCase_ : Union[str, Any] = self.pad(
lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCAmelCase_ : Tuple = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
UpperCAmelCase_ : List[Any] = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
UpperCAmelCase_ : Dict = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
UpperCAmelCase_ : Union[str, Any] = [self._np_extract_fbank_features(lowerCAmelCase_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowerCAmelCase_ ):
UpperCAmelCase_ : int = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in input_features]
else:
UpperCAmelCase_ : Tuple = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCAmelCase_ : Optional[Any] = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
UpperCAmelCase_ : Optional[Any] = padded_inputs.convert_to_tensors(lowerCAmelCase_ )
return padded_inputs
def a ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 470
|
import warnings
from .generation import TFGenerationMixin
class snake_case ( __snake_case ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" ,__snake_case ,)
| 321
| 0
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE__ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(snake_case__ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE__ = spark.range(1_00 ).repartition(1 )
SCREAMING_SNAKE_CASE__ = Spark(snake_case__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE__ = spark.range(10 ).repartition(2 )
SCREAMING_SNAKE_CASE__ = [1, 0]
SCREAMING_SNAKE_CASE__ = _generate_iterable_examples(snake_case__ , snake_case__ ) # Reverse the partitions.
SCREAMING_SNAKE_CASE__ = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE__ = spark.range(10 ).repartition(1 )
SCREAMING_SNAKE_CASE__ = SparkExamplesIterable(snake_case__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
SCREAMING_SNAKE_CASE__ = lambda snake_case__ : x.reverse()
SCREAMING_SNAKE_CASE__ = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0] )
SCREAMING_SNAKE_CASE__ = SparkExamplesIterable(snake_case__ ).shuffle_data_sources(snake_case__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE__ = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE__ = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE__ = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE__ = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE__ = spark.range(1_00 ).repartition(1 )
SCREAMING_SNAKE_CASE__ = Spark(snake_case__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 616
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A_ : List[str] = random.Random()
def A ( snake_case__ , snake_case__=1.0 , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ = global_rng
SCREAMING_SNAKE_CASE__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase (unittest.TestCase ):
def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : List[str]=4_0_0 , __UpperCAmelCase : Tuple=2_0_0_0 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : int=1_6_0_0_0 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=8_0 , __UpperCAmelCase : Dict=1_6 , __UpperCAmelCase : int=6_4 , __UpperCAmelCase : Any="hann_window" , __UpperCAmelCase : Tuple=8_0 , __UpperCAmelCase : Tuple=7_6_0_0 , __UpperCAmelCase : List[Any]=1e-10 , __UpperCAmelCase : Tuple=True , ) -> int:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = min_seq_length
SCREAMING_SNAKE_CASE__ = max_seq_length
SCREAMING_SNAKE_CASE__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = num_mel_bins
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = fmin
SCREAMING_SNAKE_CASE__ = fmax
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = return_attention_mask
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[str]=False ) -> Optional[Any]:
def _flatten(__UpperCAmelCase : List[Any] ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[int]=False ) -> str:
if equal_length:
SCREAMING_SNAKE_CASE__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Any = SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Any ) -> Tuple:
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = ["""longest""", """max_length""", """do_not_pad"""]
SCREAMING_SNAKE_CASE__ = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = range(8_0_0 , 1_4_0_0 , 2_0_0 )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ = ["""longest""", """max_length""", """do_not_pad"""]
SCREAMING_SNAKE_CASE__ = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = np.random.rand(1_0_0 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ = feature_extractor(audio_target=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = [len(__UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = [len(__UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = min(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> str:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ = ds.sort("""id""" ).select(range(__UpperCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __UpperCAmelCase , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = feature_extractor(audio_target=__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __UpperCAmelCase , atol=1e-4 ) )
| 616
| 1
|
# Function to print upper half of diamond (pyramid)
def snake_case (UpperCamelCase : Any ):
'''simple docstring'''
for i in range(0 , _lowerCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def snake_case (UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
for i in range(_lowerCamelCase , 0 , -1 ):
for _ in range(_lowerCamelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def snake_case (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCamelCase ) # upper half
reverse_floyd(_lowerCamelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a__ : Union[str, Any] = 1
while K:
a__ : List[Any] = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a__ : str = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 165
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = 1
for i in range(1 , num + 1):
fact *= i
return fact
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = 0
while number > 0:
__UpperCamelCase : List[Any] = number % 10
sum_of_digits += last_digit
__UpperCamelCase : List[Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = factorial(_lowerCamelCase)
__UpperCamelCase : Tuple = split_and_add(_lowerCamelCase)
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 557
| 0
|
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ):
a_ : Tuple = ['''speech''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['''speech'''])
class _SCREAMING_SNAKE_CASE ( metaclass=_lowerCAmelCase ):
a_ : Dict = ['''speech''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['''speech'''])
| 708
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> List[Any]:
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCAmelCase =XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
else:
__UpperCAmelCase =ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
__UpperCAmelCase =['''key_proj''', '''value_proj''', '''query_proj''']
__UpperCAmelCase ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__UpperCAmelCase =key.split('''.''' )
if attributes[0] == "lm_head":
__UpperCAmelCase =prophet
__UpperCAmelCase =prophet_old
else:
__UpperCAmelCase =prophet.prophetnet
__UpperCAmelCase =prophet_old.model
__UpperCAmelCase =False
for attribute in attributes:
if attribute in mapping:
__UpperCAmelCase =mapping[attribute]
if not hasattr(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
__UpperCAmelCase =attribute
elif hasattr(snake_case__ , snake_case__ ):
__UpperCAmelCase =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCAmelCase =old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__UpperCAmelCase =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCAmelCase =old_model.bias
logger.info(f"""{attribute} is initialized""" )
__UpperCAmelCase =True
break
elif attribute in special_keys and hasattr(snake_case__ , '''in_proj_weight''' ):
__UpperCAmelCase =old_model.in_proj_weight.shape[0] // 3
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCAmelCase =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__UpperCAmelCase =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__UpperCAmelCase =True
break
if attribute.isdigit():
__UpperCAmelCase =model[int(snake_case__ )]
__UpperCAmelCase =old_model[int(snake_case__ )]
else:
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if old_attribute == "":
__UpperCAmelCase =old_model
else:
if not hasattr(snake_case__ , snake_case__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 142
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class _SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
self.test()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCamelCase = self.advance()
if not self.does_advance(_snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__lowerCamelCase = self.update(_snake_case )
counter += 1
if counter > 1_00_00:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def _lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowerCamelCase ( self , _snake_case=False ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self , _snake_case ):
"""simple docstring"""
super(_snake_case , self ).__init__()
if not isinstance(_snake_case , _snake_case ) or len(_snake_case ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_snake_case , _snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCamelCase = token_ids
__lowerCamelCase = len(self.token_ids )
__lowerCamelCase = -1 # the index of the currently fulfilled step
__lowerCamelCase = False
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_snake_case )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_snake_case )}''' )
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
if self.does_advance(_snake_case ):
self.fulfilled_idx += 1
__lowerCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCamelCase = True
__lowerCamelCase = completed
else:
# failed to make progress.
__lowerCamelCase = True
self.reset()
return stepped, completed, reset
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = False
__lowerCamelCase = 0
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def _lowerCamelCase ( self , _snake_case=False ):
"""simple docstring"""
__lowerCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCamelCase = self.seqlen
__lowerCamelCase = self.fulfilled_idx
__lowerCamelCase = self.completed
return new_constraint
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=True ):
"""simple docstring"""
__lowerCamelCase = max([len(_snake_case ) for one in nested_token_ids] )
__lowerCamelCase = {}
for token_ids in nested_token_ids:
__lowerCamelCase = root
for tidx, token_id in enumerate(_snake_case ):
if token_id not in level:
__lowerCamelCase = {}
__lowerCamelCase = level[token_id]
if no_subsets and self.has_subsets(_snake_case , _snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F''' {nested_token_ids}.''' )
__lowerCamelCase = root
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.trie
for current_token in current_seq:
__lowerCamelCase = start[current_token]
__lowerCamelCase = list(start.keys() )
return next_tokens
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.next_tokens(_snake_case )
return len(_snake_case ) == 0
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = list(root.values() )
if len(_snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(_snake_case ) for nn in next_nodes] )
def _lowerCamelCase ( self , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.count_leaves(_snake_case )
return len(_snake_case ) != leaf_count
class _SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self , _snake_case ):
"""simple docstring"""
super(_snake_case , self ).__init__()
if not isinstance(_snake_case , _snake_case ) or len(_snake_case ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_snake_case , _snake_case ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_snake_case , _snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCamelCase = DisjunctiveTrie(_snake_case )
__lowerCamelCase = nested_token_ids
__lowerCamelCase = self.trie.max_height
__lowerCamelCase = []
__lowerCamelCase = False
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.trie.next_tokens(self.current_seq )
if len(_snake_case ) == 0:
return None
else:
return token_list
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_snake_case )}''' )
__lowerCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_snake_case )}''' )
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
if self.does_advance(_snake_case ):
self.current_seq.append(_snake_case )
__lowerCamelCase = True
else:
__lowerCamelCase = True
self.reset()
__lowerCamelCase = self.trie.reached_leaf(self.current_seq )
__lowerCamelCase = completed
return stepped, completed, reset
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = False
__lowerCamelCase = []
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _lowerCamelCase ( self , _snake_case=False ):
"""simple docstring"""
__lowerCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCamelCase = self.seqlen
__lowerCamelCase = self.current_seq
__lowerCamelCase = self.completed
return new_constraint
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCamelCase = max([c.seqlen for c in constraints] )
__lowerCamelCase = len(_snake_case )
__lowerCamelCase = False
self.init_state()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = None
__lowerCamelCase = [constraint.copy(stateful=_snake_case ) for constraint in self.constraints]
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCamelCase = constraint.advance()
if isinstance(_snake_case , _snake_case ):
token_list.append(_snake_case )
elif isinstance(_snake_case , _snake_case ):
token_list.extend(_snake_case )
else:
__lowerCamelCase = self.inprogress_constraint.advance()
if isinstance(_snake_case , _snake_case ):
token_list.append(_snake_case )
elif isinstance(_snake_case , _snake_case ):
token_list.extend(_snake_case )
if len(_snake_case ) == 0:
return None
else:
return token_list
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCamelCase = self.add(_snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCamelCase = False, False
if self.completed:
__lowerCamelCase = True
__lowerCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCamelCase = self.inprogress_constraint.update(_snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_snake_case ) )
__lowerCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_snake_case ):
__lowerCamelCase = pending_constraint.update(_snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_snake_case )
__lowerCamelCase = None
if not complete and stepped:
__lowerCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowerCamelCase ( self , _snake_case=True ):
"""simple docstring"""
__lowerCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCamelCase = [
constraint.copy(stateful=_snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCamelCase = self.inprogress_constraint.copy(stateful=_snake_case )
__lowerCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 316
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
def _a ( lowerCamelCase_ , lowerCamelCase_=False ):
snake_case : Dict =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case : Union[str, Any] =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case : str =''''''
else:
snake_case : List[Any] ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : List[str] =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case : List[Any] =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Tuple =in_proj_weight[
: config.hidden_size, :
]
snake_case : Union[str, Any] =in_proj_bias[: config.hidden_size]
snake_case : Any =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : Optional[int] =in_proj_weight[
-config.hidden_size :, :
]
snake_case : Tuple =in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase_ ):
snake_case : Optional[int] =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[Any] =dct.pop(lowerCamelCase_ )
snake_case : Any =val
def _a ( ):
snake_case : Tuple ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case : int =Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : List[Any] =ViTConfig()
snake_case : str =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case : List[str] =True
snake_case : Tuple =int(vit_name[-12:-10] )
snake_case : str =int(vit_name[-9:-6] )
else:
snake_case : List[Any] =10_00
snake_case : int ='''huggingface/label-files'''
snake_case : Union[str, Any] ='''imagenet-1k-id2label.json'''
snake_case : int =json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : List[Any] ={int(lowerCamelCase_ ): v for k, v in idalabel.items()}
snake_case : Optional[int] =idalabel
snake_case : str ={v: k for k, v in idalabel.items()}
snake_case : Union[str, Any] =int(vit_name[-6:-4] )
snake_case : Optional[int] =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case : List[str] =1_92
snake_case : Dict =7_68
snake_case : str =12
snake_case : Any =3
elif vit_name[9:].startswith('''small''' ):
snake_case : List[Any] =3_84
snake_case : List[Any] =15_36
snake_case : Optional[Any] =12
snake_case : Union[str, Any] =6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case : List[str] =7_68
snake_case : Any =23_04
snake_case : Union[str, Any] =8
snake_case : Union[str, Any] =8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case : int =10_24
snake_case : Tuple =40_96
snake_case : List[Any] =24
snake_case : Union[str, Any] =16
elif vit_name[4:].startswith('''huge''' ):
snake_case : Any =12_80
snake_case : int =51_20
snake_case : List[str] =32
snake_case : Any =16
# load original model from timm
snake_case : Optional[int] =timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case : Union[str, Any] =timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase_ )
snake_case : Dict =create_rename_keys(lowerCamelCase_ , lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case : Any =ViTModel(lowerCamelCase_ ).eval()
else:
snake_case : Union[str, Any] =ViTForImageClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case : int =DeiTImageProcessor(size=config.image_size )
else:
snake_case : Tuple =ViTImageProcessor(size=config.image_size )
snake_case : Union[str, Any] =image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case : List[str] =encoding['''pixel_values''']
snake_case : Optional[Any] =model(lowerCamelCase_ )
if base_model:
snake_case : Tuple =timm_model.forward_features(lowerCamelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCamelCase_ , outputs.pooler_output , atol=1e-3 )
else:
snake_case : Dict =timm_model(lowerCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase_ , outputs.logits , atol=1e-3 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 349
| 0
|
'''simple docstring'''
def lowercase_ ( _lowercase = 50 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 701
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowercase_ ( _lowercase , _lowercase ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Union[str, Any] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__A ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : Dict , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : List[Any] , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Optional[Any] =size if size is not None else {'''shortest_edge''': 256}
lowercase : Dict =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : int =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : str =get_size_dict(UpperCAmelCase , param_name='''crop_size''' )
lowercase : Tuple =do_resize
lowercase : Optional[int] =size
lowercase : Union[str, Any] =do_center_crop
lowercase : str =crop_size
lowercase : Optional[int] =resample
lowercase : Optional[Any] =do_rescale
lowercase : Tuple =rescale_factor
lowercase : Optional[int] =offset
lowercase : Tuple =do_normalize
lowercase : Optional[Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Optional[int] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Optional[int] =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
lowercase : Union[str, Any] =get_resize_output_image_size(UpperCAmelCase , size['''shortest_edge'''] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowercase : List[Any] =(size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Dict =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ) -> str:
'''simple docstring'''
lowercase : Dict =image.astype(np.floataa )
if offset:
lowercase : List[str] =image - (scale / 2)
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowercase : Optional[int] =to_numpy_array(UpperCAmelCase )
if do_resize:
lowercase : Optional[int] =self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
lowercase : List[str] =self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
lowercase : Dict =self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase , offset=UpperCAmelCase )
if do_normalize:
lowercase : str =self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
lowercase : Dict =to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def A__ ( self : int , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : str =do_resize if do_resize is not None else self.do_resize
lowercase : int =resample if resample is not None else self.resample
lowercase : List[str] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Tuple =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Any =offset if offset is not None else self.offset
lowercase : str =do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] =image_std if image_std is not None else self.image_std
lowercase : str =size if size is not None else self.size
lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : Union[str, Any] =crop_size if crop_size is not None else self.crop_size
lowercase : str =get_size_dict(UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Optional[Any] =make_batched(UpperCAmelCase )
lowercase : Optional[Any] =[
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , offset=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
lowercase : Dict ={'''pixel_values''': videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 94
|
'''simple docstring'''
from math import isqrt
def lowercase_ ( __A : int ) -> list[int]:
"""simple docstring"""
lowercase : Dict =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __A , __A ):
lowercase : str =False
return [i for i in range(2 , __A ) if is_prime[i]]
def lowercase_ ( __A : int = 1_0**8 ) -> int:
"""simple docstring"""
lowercase : Dict =calculate_prime_numbers(max_number // 2 )
lowercase : str =0
lowercase : Optional[Any] =0
lowercase : Union[str, Any] =len(__A ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94
| 1
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( snake_case : int , snake_case : Any ):
_lowerCAmelCase:Optional[int] = old_name
if "patch_embed" in old_name:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:int = old_name.split('''.''' )
if layer == "0":
_lowerCAmelCase:Optional[Any] = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
_lowerCAmelCase:int = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
_lowerCAmelCase:Optional[Any] = old_name.replace('''3''' , '''convolution2''' )
else:
_lowerCAmelCase:str = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , snake_case ):
_lowerCAmelCase:Tuple = R'''\b\d{2}\b'''
if bool(re.search(snake_case , snake_case ) ):
_lowerCAmelCase:Union[str, Any] = re.search(R'''\d\.\d\d.''' , snake_case ).group()
else:
_lowerCAmelCase:Dict = re.search(R'''\d\.\d.''' , snake_case ).group()
if int(match[0] ) < 6:
_lowerCAmelCase:str = old_name.replace(snake_case , '''''' )
_lowerCAmelCase:str = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
_lowerCAmelCase:Any = '''intermediate_stages.''' + trimmed_name
else:
_lowerCAmelCase:List[str] = old_name.replace(snake_case , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
_lowerCAmelCase:Tuple = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
_lowerCAmelCase:Dict = str(int(match[2] ) - num_meta4D_last_stage )
_lowerCAmelCase:List[Any] = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
_lowerCAmelCase:Dict = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
_lowerCAmelCase:Optional[Any] = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
_lowerCAmelCase:Any = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
_lowerCAmelCase:Any = trimmed_name.replace('''fc2''' , '''linear_out''' )
_lowerCAmelCase:Tuple = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , snake_case ):
_lowerCAmelCase:List[Any] = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
_lowerCAmelCase:Optional[Any] = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_lowerCAmelCase:Tuple = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_lowerCAmelCase:Optional[int] = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
_lowerCAmelCase:List[str] = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
_lowerCAmelCase:List[Any] = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
_lowerCAmelCase:List[Any] = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
_lowerCAmelCase:Union[str, Any] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_lowerCAmelCase:int = new_name.replace('''norm''' , '''layernorm''' )
_lowerCAmelCase:List[str] = '''efficientformer.''' + new_name
else:
_lowerCAmelCase:int = '''efficientformer.encoder.''' + new_name
return new_name
def UpperCAmelCase ( snake_case : Optional[Any] , snake_case : List[str] ):
for key in checkpoint.copy().keys():
_lowerCAmelCase:Any = checkpoint.pop(snake_case )
_lowerCAmelCase:int = val
return checkpoint
def UpperCAmelCase ( ):
_lowerCAmelCase:Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCAmelCase:Union[str, Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
def UpperCAmelCase ( snake_case : Path , snake_case : Path , snake_case : Path , snake_case : bool ):
_lowerCAmelCase:List[Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
_lowerCAmelCase:Dict = EfficientFormerConfig.from_json_file(snake_case )
_lowerCAmelCase:Optional[Any] = EfficientFormerForImageClassificationWithTeacher(snake_case )
_lowerCAmelCase:int = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
_lowerCAmelCase:Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
_lowerCAmelCase:Optional[int] = convert_torch_checkpoint(snake_case , snake_case )
model.load_state_dict(snake_case )
model.eval()
_lowerCAmelCase:Any = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
_lowerCAmelCase:Any = prepare_img()
_lowerCAmelCase:Tuple = 256
_lowerCAmelCase:int = 224
_lowerCAmelCase:str = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
_lowerCAmelCase:Tuple = processor(images=snake_case , return_tensors='''pt''' ).pixel_values
# original processing pipeline
_lowerCAmelCase:Dict = Compose(
[
Resize(snake_case , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(snake_case ),
ToTensor(),
Normalize(snake_case , snake_case ),
] )
_lowerCAmelCase:str = image_transforms(snake_case ).unsqueeze(0 )
assert torch.allclose(snake_case , snake_case )
_lowerCAmelCase:Optional[int] = model(snake_case )
_lowerCAmelCase:Union[str, Any] = outputs.logits
_lowerCAmelCase:Optional[Any] = (1, 1000)
if "l1" in model_name:
_lowerCAmelCase:Optional[Any] = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_lowerCAmelCase:str = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_lowerCAmelCase:List[str] = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(snake_case )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='''Add model''' , use_temp_dir=snake_case , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='''Add image processor''' , use_temp_dir=snake_case , )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 439
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( UpperCamelCase_ ):
def __init__( self : Dict ,*a__ : List[str] ,**a__ : str) -> None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' ,a__ ,)
super().__init__(*a__ ,**a__)
| 439
| 1
|
'''simple docstring'''
import string
import numpy
def _lowercase ( __A ,__A ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a ,__A )
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__SCREAMING_SNAKE_CASE = numpy.vectorize(lambda UpperCAmelCase_: x % 3_6)
__SCREAMING_SNAKE_CASE = numpy.vectorize(UpperCAmelCase_)
def __init__( self , lowercase ) -> None:
__UpperCamelCase = self.modulus(lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__UpperCamelCase = encrypt_key.shape[0]
def __lowerCamelCase ( self , lowercase ) -> int:
return self.key_string.index(lowercase )
def __lowerCamelCase ( self , lowercase ) -> str:
return self.key_string[round(lowercase )]
def __lowerCamelCase ( self ) -> None:
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = len(self.key_string )
if greatest_common_divisor(lowercase , len(self.key_string ) ) != 1:
__UpperCamelCase = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(lowercase )
def __lowerCamelCase ( self , lowercase ) -> str:
__UpperCamelCase = [char for char in text.upper() if char in self.key_string]
__UpperCamelCase = chars[-1]
while len(lowercase ) % self.break_key != 0:
chars.append(lowercase )
return "".join(lowercase )
def __lowerCamelCase ( self , lowercase ) -> str:
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = """"""
for i in range(0 , len(lowercase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(lowercase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(self.encrypt_key.dot(lowercase ) ).T.tolist()[
0
]
__UpperCamelCase = """""".join(
self.replace_digits(lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __lowerCamelCase ( self ) -> numpy.ndarray:
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__UpperCamelCase = i
break
__UpperCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase ) )
def __lowerCamelCase ( self , lowercase ) -> str:
__UpperCamelCase = self.make_decrypt_key()
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = """"""
for i in range(0 , len(lowercase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(lowercase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(decrypt_key.dot(lowercase ) ).T.tolist()[0]
__UpperCamelCase = """""".join(
self.replace_digits(lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__UpperCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(__A ):
__UpperCamelCase = [int(__A ) for x in input().split()]
hill_matrix.append(__A )
__UpperCamelCase = HillCipher(numpy.array(__A ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__UpperCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__UpperCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(__A ) )
elif option == "2":
__UpperCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 601
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = LDMTextToImagePipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCamelCase = CLIPTextModel(lowercase )
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __lowerCamelCase ( self , lowercase , lowercase=0 ) -> Optional[Any]:
if str(lowercase ).startswith("""mps""" ):
__UpperCamelCase = torch.manual_seed(lowercase )
else:
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = LDMTextToImagePipeline(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_dummy_inputs(lowercase )
__UpperCamelCase = pipe(**lowercase ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
__UpperCamelCase = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self , lowercase , lowercase=torch.floataa , lowercase=0 ) -> List[Any]:
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = np.random.RandomState(lowercase ).standard_normal((1, 4, 3_2, 3_2) )
__UpperCamelCase = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
__UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_inputs(lowercase )
__UpperCamelCase = pipe(**lowercase ).images
__UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__UpperCamelCase = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
__UpperCamelCase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self , lowercase , lowercase=torch.floataa , lowercase=0 ) -> List[str]:
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = np.random.RandomState(lowercase ).standard_normal((1, 4, 3_2, 3_2) )
__UpperCamelCase = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
__UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 5_0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_inputs(lowercase )
__UpperCamelCase = pipe(**lowercase ).images[0]
__UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__UpperCamelCase = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 601
| 1
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = DDIMPipeline
UpperCAmelCase__ : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase__ : Any = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
UpperCAmelCase__ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase__ : Any = False
def lowerCamelCase__( self :List[Any] ) -> Any:
torch.manual_seed(0 )
a__ = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
a__ = DDIMScheduler()
a__ = {'unet': unet, 'scheduler': scheduler}
return components
def lowerCamelCase__( self :str ,__snake_case :int ,__snake_case :List[Any]=0 ) -> List[str]:
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a__ = self.get_dummy_inputs(__snake_case )
a__ = pipe(**__snake_case ).images
a__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 32, 32, 3) )
a__ = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
a__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case ,1E-3 )
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase__( self :List[Any] ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase__( self :str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ) -> Any:
a__ = 'google/ddpm-cifar10-32'
a__ = UNetaDModel.from_pretrained(__snake_case )
a__ = DDIMScheduler()
a__ = DDIMPipeline(unet=__snake_case ,scheduler=__snake_case )
ddim.to(__snake_case )
ddim.set_progress_bar_config(disable=__snake_case )
a__ = torch.manual_seed(0 )
a__ = ddim(generator=__snake_case ,eta=0.0 ,output_type='numpy' ).images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :Tuple ) -> int:
a__ = 'google/ddpm-ema-bedroom-256'
a__ = UNetaDModel.from_pretrained(__snake_case )
a__ = DDIMScheduler.from_pretrained(__snake_case )
a__ = DDIMPipeline(unet=__snake_case ,scheduler=__snake_case )
ddpm.to(__snake_case )
ddpm.set_progress_bar_config(disable=__snake_case )
a__ = torch.manual_seed(0 )
a__ = ddpm(generator=__snake_case ,output_type='numpy' ).images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a__ = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 657
|
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657
| 1
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowerCamelCase : str = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def a_ ( __lowercase : Any=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase ) )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : List[Any] = None
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowercase , cache_dir=lowercase )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowercase )
_snake_case = builder_cls(
cache_dir=lowercase , config_name=lowercase , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowercase ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowercase , cache_dir=lowercase )
self.assertTrue(os.path.exists(lowercase ) )
@pytest.mark.integration
def a_ ( __lowercase : List[Any] ) -> List[str]:
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=__lowercase )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=__lowercase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( __lowercase : int ) -> int:
_snake_case = dataset_module_factory('wikipedia' , cache_dir=__lowercase )
_snake_case = import_main_class(dataset_module.module_path , dataset=__lowercase )
_snake_case = builder_cls(
cache_dir=__lowercase , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowercase , __lowercase )
assert "train" in ds
assert isinstance(ds['train'] , __lowercase )
assert next(iter(ds['train'] ) )
| 686
|
import random
def a_ ( __lowercase : str , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
_snake_case = a[left_index]
_snake_case = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_snake_case , _snake_case = a[i], a[j]
i += 1
_snake_case , _snake_case = a[i - 1], a[left_index]
return i - 1
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
if left < right:
_snake_case = random.randint(__lowercase , right - 1 )
_snake_case , _snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_snake_case = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> str:
_snake_case = input('Enter numbers separated by a comma:\n' ).strip()
_snake_case = [int(__lowercase ) for item in user_input.split(',' )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main()
| 686
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case = False
class __A ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = pipe.dual_guided(
prompt="first prompt" , image=_snake_case , text_to_image_strength=0.75 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
_lowerCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(_snake_case , torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Any = generator.manual_seed(0 )
_lowerCAmelCase : Any = pipe.dual_guided(
prompt="first prompt" , image=_snake_case , text_to_image_strength=0.75 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Optional[int] = "cyberpunk 2077"
_lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_lowerCAmelCase : Dict = torch.manual_seed(0 )
_lowerCAmelCase : int = pipe.dual_guided(
prompt=_snake_case , image=_snake_case , text_to_image_strength=0.75 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_lowerCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger "
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pipe.text_to_image(
prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_lowerCAmelCase : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowerCAmelCase : Optional[int] = pipe.image_variation(_snake_case , generator=_snake_case , output_type="numpy" ).images
_lowerCAmelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 587
|
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = word.split()
def justify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
_lowerCAmelCase : Union[str, Any] = max_width - width
_lowerCAmelCase : Optional[Any] = len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowerCAmelCase : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowerCAmelCase : Any = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowerCAmelCase : str = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCAmelCase__ ):
num_spaces_between_words_list[i] += 1
_lowerCAmelCase : Dict = []
for i in range(lowerCAmelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : list[str] = []
_lowerCAmelCase : Optional[int] = 0
for word in words:
if width + len(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCAmelCase__ )
width += len(lowerCAmelCase__ )
else:
# justify the line and add it to result
answer.append(justify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
# reset new line and new width
_lowerCAmelCase , _lowerCAmelCase : int = [word], len(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = max_width - width - len(lowerCAmelCase__ )
answer.append(" ".join(lowerCAmelCase__ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 587
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Optional[NestedDataStructureLike[PathLike]] = None , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : Optional[Any] , ):
UpperCAmelCase_ = path_or_paths
UpperCAmelCase_ = split if split or isinstance(__a , __a ) else "train"
UpperCAmelCase_ = features
UpperCAmelCase_ = cache_dir
UpperCAmelCase_ = keep_in_memory
UpperCAmelCase_ = streaming
UpperCAmelCase_ = num_proc
UpperCAmelCase_ = kwargs
@abstractmethod
def _lowercase (self : Tuple ):
pass
class __A ( UpperCamelCase__ ):
def __init__(self : Union[str, Any] , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : Any , ):
UpperCAmelCase_ = features
UpperCAmelCase_ = cache_dir
UpperCAmelCase_ = keep_in_memory
UpperCAmelCase_ = streaming
UpperCAmelCase_ = num_proc
UpperCAmelCase_ = kwargs
@abstractmethod
def _lowercase (self : List[str] ):
pass
| 78
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = LDMTextToImagePipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCamelCase = CLIPTextModel(lowercase )
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __lowerCamelCase ( self , lowercase , lowercase=0 ) -> Optional[Any]:
if str(lowercase ).startswith("""mps""" ):
__UpperCamelCase = torch.manual_seed(lowercase )
else:
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = LDMTextToImagePipeline(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_dummy_inputs(lowercase )
__UpperCamelCase = pipe(**lowercase ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
__UpperCamelCase = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self , lowercase , lowercase=torch.floataa , lowercase=0 ) -> List[Any]:
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = np.random.RandomState(lowercase ).standard_normal((1, 4, 3_2, 3_2) )
__UpperCamelCase = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
__UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_inputs(lowercase )
__UpperCamelCase = pipe(**lowercase ).images
__UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__UpperCamelCase = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
__UpperCamelCase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self , lowercase , lowercase=torch.floataa , lowercase=0 ) -> List[str]:
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = np.random.RandomState(lowercase ).standard_normal((1, 4, 3_2, 3_2) )
__UpperCamelCase = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
__UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 5_0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_inputs(lowercase )
__UpperCamelCase = pipe(**lowercase ).images[0]
__UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__UpperCamelCase = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 601
| 0
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class lowerCamelCase_ ( lowercase ):
__lowercase : Union[str, Any] = "encodec"
def __init__( self , lowerCamelCase_=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase_=2_40_00 , lowerCamelCase_=1 , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=1_28 , lowerCamelCase_=32 , lowerCamelCase_=1 , lowerCamelCase_=[8, 5, 4, 2] , lowerCamelCase_="weight_norm" , lowerCamelCase_=7 , lowerCamelCase_=7 , lowerCamelCase_=3 , lowerCamelCase_=2 , lowerCamelCase_=True , lowerCamelCase_="reflect" , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=1.0 , lowerCamelCase_=10_24 , lowerCamelCase_=None , lowerCamelCase_=True , **lowerCamelCase_ , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = target_bandwidths
_UpperCamelCase = sampling_rate
_UpperCamelCase = audio_channels
_UpperCamelCase = normalize
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
_UpperCamelCase = hidden_size
_UpperCamelCase = num_filters
_UpperCamelCase = num_residual_layers
_UpperCamelCase = upsampling_ratios
_UpperCamelCase = norm_type
_UpperCamelCase = kernel_size
_UpperCamelCase = last_kernel_size
_UpperCamelCase = residual_kernel_size
_UpperCamelCase = dilation_growth_rate
_UpperCamelCase = use_causal_conv
_UpperCamelCase = pad_mode
_UpperCamelCase = compress
_UpperCamelCase = num_lstm_layers
_UpperCamelCase = trim_right_ratio
_UpperCamelCase = codebook_size
_UpperCamelCase = codebook_dim if codebook_dim is not None else hidden_size
_UpperCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**lowerCamelCase_ )
@property
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase ( self ) -> int:
"""simple docstring"""
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 589
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=64 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=1 , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = q_groups
_UpperCamelCase = k_groups
_UpperCamelCase = v_groups
_UpperCamelCase = post_attention_groups
_UpperCamelCase = intermediate_groups
_UpperCamelCase = output_groups
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = SqueezeBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowercase : Dict = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : List[str] = True
__lowercase : Any = False
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , dim=37 )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> Any:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = SqueezeBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_UpperCamelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_UpperCamelCase = model(lowerCamelCase_ )[0]
_UpperCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCamelCase_ )
_UpperCamelCase = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-4 ) )
| 589
| 1
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 212
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
snake_case_ : Optional[int] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
snake_case_ : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = SqueezeBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**lowerCamelCase__ )
UpperCamelCase = do_lower_case
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 212
| 1
|
'''simple docstring'''
import operator as op
def _lowercase ( a__ : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = lambda a__ , a__ : int(x / y ) # noqa: E731 integer division operation
_UpperCamelCase = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowerCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowerCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
else:
_UpperCamelCase = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
_UpperCamelCase = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowerCAmelCase ) , int(__lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowerCAmelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__lowerCAmelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 712
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
def lowercase ( self ) -> Any:
"""simple docstring"""
return {}
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_UpperCamelCase = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : List[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = MarkupLMFeatureExtractionTester(self )
@property
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class()
# Test not batched input
_UpperCamelCase = get_html_strings()[0]
_UpperCamelCase = feature_extractor(lowerCamelCase_ )
# fmt: off
_UpperCamelCase = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_UpperCamelCase = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCamelCase_ )
self.assertEqual(encoding.xpaths , lowerCamelCase_ )
# Test batched
_UpperCamelCase = get_html_strings()
_UpperCamelCase = feature_extractor(lowerCamelCase_ )
# fmt: off
_UpperCamelCase = expected_nodes + [["My First Heading", "My first paragraph."]]
_UpperCamelCase = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCamelCase_ )
self.assertEqual(encoding.xpaths , lowerCamelCase_ )
| 589
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''LayoutLMv2ImageProcessor'''
lowerCAmelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__A : str = kwargs.pop('feature_extractor')
__A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')
# first, apply the image processor
__A : Dict = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__A : Dict = features['words']
__A : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__A : Dict = features.pop('pixel_values')
if return_overflowing_tokens is True:
__A : str = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'])
__A : str = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(_UpperCAmelCase)} and {len(_UpperCAmelCase)}')
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 8
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ) -> Optional[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def a_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def a_ ( ) -> Dict:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' )
| 686
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E00 and cp <= 0X9_FFF)
or (cp >= 0X3_400 and cp <= 0X4_DBF) #
or (cp >= 0X20_000 and cp <= 0X2A_6DF) #
or (cp >= 0X2A_700 and cp <= 0X2B_73F) #
or (cp >= 0X2B_740 and cp <= 0X2B_81F) #
or (cp >= 0X2B_820 and cp <= 0X2C_EAF) #
or (cp >= 0XF_900 and cp <= 0XF_AFF)
or (cp >= 0X2F_800 and cp <= 0X2F_A1F) #
): #
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
# word like '180' or '身高' or '神'
for char in word:
snake_case : str = ord(lowercase )
if not _is_chinese_char(lowercase ):
return 0
return 1
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
snake_case : Any = set()
for token in tokens:
snake_case : int = len(lowercase ) > 1 and is_chinese(lowercase )
if chinese_word:
word_set.add(lowercase )
snake_case : str = list(lowercase )
return word_list
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
if not chinese_word_set:
return bert_tokens
snake_case : Dict = max([len(lowercase ) for w in chinese_word_set] )
snake_case : Union[str, Any] = bert_tokens
snake_case , snake_case : Optional[int] = 0, len(lowercase )
while start < end:
snake_case : Optional[int] = True
if is_chinese(bert_word[start] ):
snake_case : str = min(end - start ,lowercase )
for i in range(lowercase ,1 ,-1 ):
snake_case : Dict = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
snake_case : List[Any] = """##""" + bert_word[j]
snake_case : List[str] = start + i
snake_case : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : List[Any] = []
for i in range(0 ,len(lowercase ) ,100 ):
snake_case : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
snake_case : Optional[Any] = [get_chinese_word(lowercase ) for r in res]
ltp_res.extend(lowercase )
assert len(lowercase ) == len(lowercase )
snake_case : int = []
for i in range(0 ,len(lowercase ) ,100 ):
snake_case : Dict = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=lowercase ,truncation=lowercase ,max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(lowercase ) == len(lowercase )
snake_case : Any = []
for input_ids, chinese_word in zip(lowercase ,lowercase ):
snake_case : Union[str, Any] = []
for id in input_ids:
snake_case : Dict = bert_tokenizer._convert_id_to_token(lowercase )
input_tokens.append(lowercase )
snake_case : int = add_sub_symbol(lowercase ,lowercase )
snake_case : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase ):
if token[:2] == "##":
snake_case : Dict = token[2:]
# save chinese tokens' pos
if len(lowercase ) == 1 and _is_chinese_char(ord(lowercase ) ):
ref_id.append(lowercase )
ref_ids.append(lowercase )
assert len(lowercase ) == len(lowercase )
return ref_ids
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name ,"""r""" ,encoding="""utf-8""" ) as f:
snake_case : Tuple = f.readlines()
snake_case : Dict = [line.strip() for line in data if len(lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case : List[Any] = LTP(args.ltp ) # faster in GPU device
snake_case : Any = BertTokenizer.from_pretrained(args.bert )
snake_case : int = prepare_ref(lowercase ,lowercase ,lowercase )
with open(args.save_path ,"""w""" ,encoding="""utf-8""" ) as f:
snake_case : str = [json.dumps(lowercase ) + """\n""" for ref in ref_ids]
f.writelines(lowercase )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
lowerCamelCase : List[Any] = parser.parse_args()
main(args)
| 684
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684
| 1
|
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class snake_case__ ( UpperCamelCase):
def __init__( self : Optional[int] , _A : Tuple , _A : Tuple=13 , _A : Optional[int]=7 , _A : int=True , _A : List[Any]=True , _A : List[str]=False , _A : List[str]=True , _A : str=99 , _A : Tuple=32 , _A : Optional[Any]=5 , _A : str=4 , _A : int=64 , _A : Union[str, Any]="gelu" , _A : Union[str, Any]=0.1 , _A : List[str]=0.1 , _A : Optional[int]=5_12 , _A : Optional[Any]=16 , _A : List[str]=2 , _A : Optional[int]=0.02 , _A : Optional[Any]=3 , _A : int=4 , _A : Optional[int]=None , _A : List[Any]=2 , _A : List[Any]=2 , _A : Dict=2 , _A : Optional[Any]=2 , _A : Optional[Any]=4 , _A : Union[str, Any]=1 , ) -> Tuple:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : int = num_choices
UpperCAmelCase_ : Any = scope
UpperCAmelCase_ : Dict = q_groups
UpperCAmelCase_ : str = k_groups
UpperCAmelCase_ : Optional[int] = v_groups
UpperCAmelCase_ : int = post_attention_groups
UpperCAmelCase_ : Union[str, Any] = intermediate_groups
UpperCAmelCase_ : List[str] = output_groups
def A ( self : Any ) -> int:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Tuple ) -> Union[str, Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def A ( self : Optional[int] , _A : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : str , _A : List[Any] , _A : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : List[str] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Optional[int] = model(_A , _A )
UpperCAmelCase_ : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , _A : str , _A : int , _A : Tuple , _A : Dict , _A : Optional[Any] , _A : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Tuple = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Dict = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , _A : Tuple , _A : Optional[Any] , _A : Tuple , _A : Any , _A : Any , _A : Optional[Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : List[str] = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any , _A : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : Dict ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : List[str] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , _A : Tuple , _A : Optional[int] , _A : Dict , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Tuple , _A : Any , _A : Tuple , _A : Tuple , _A : Dict , _A : Tuple , _A : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : str = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : str = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Any = config_and_inputs
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a_ = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = True
a_ = False
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Any = SqueezeBertModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=_A , dim=37 )
def A ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def A ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def A ( self : Any ) -> int:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def A ( self : int ) -> List[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def A ( self : str ) -> Union[str, Any]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case__ ( unittest.TestCase):
@slow
def A ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
UpperCAmelCase_ : List[Any] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
UpperCAmelCase_ : List[Any] = model(_A )[0]
UpperCAmelCase_ : Any = torch.Size((1, 3) )
self.assertEqual(output.shape , _A )
UpperCAmelCase_ : int = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(_A , _A , atol=1e-4 ) )
| 541
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ :
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Dict=2 , _A : Optional[Any]=32 , _A : List[str]=16 , _A : str=3 , _A : List[Any]=True , _A : Optional[int]=True , _A : Optional[int]=32 , _A : Optional[Any]=4 , _A : Optional[int]=[0, 1, 2, 3] , _A : List[Any]=4 , _A : Optional[int]=37 , _A : Optional[Any]="gelu" , _A : Optional[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Dict=0.02 , _A : Optional[Any]=3 , _A : Union[str, Any]=[1, 3_84, 24, 24] , _A : int=True , _A : int=None , ) -> Tuple:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : int = backbone_out_indices
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = backbone_featmap_shape
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_A , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = DPTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , _A : str , _A : Tuple , _A : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Any = DPTForDepthEstimation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : List[str] , _A : str , _A : Union[str, Any] , _A : int ) -> Dict:
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Optional[Any] = DPTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a_ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase_ : Any = DPTModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def A ( self : Union[str, Any] ) -> str:
pass
def A ( self : str ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def A ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(_A )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : List[str] ) -> str:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_A )
def A ( self : str ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def A ( self : Tuple ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
if model_class in get_values(_A ):
continue
UpperCAmelCase_ : Optional[int] = model_class(_A )
model.to(_A )
model.train()
UpperCAmelCase_ : Any = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : Tuple = model(**_A ).loss
loss.backward()
def A ( self : Union[str, Any] ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = True
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase_ : int = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : List[str] = model(**_A ).loss
loss.backward()
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(_A )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=_A )
# Skip the check for the backbone
UpperCAmelCase_ : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase_ : Union[str, Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Tuple ) -> Optional[Any]:
pass
@slow
def A ( self : Any ) -> int:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase_ : Any = DPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A ( self : Union[str, Any] ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = '''add'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[Any] = DPTForDepthEstimation(_A )
def __UpperCAmelCase ( ) -> List[Any]:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCAmelCase_ : Any = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_A )
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**_A )
UpperCAmelCase_ : List[Any] = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase_ : str = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _A )
UpperCAmelCase_ : Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _A , atol=1e-4 ) )
| 541
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Any ={
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] =["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple =["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] =[
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] =[
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] =[
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 550
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowercase : int ="""\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__lowercase : List[Any] ="""\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__lowercase : str =R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: Any ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =0.0
for i, j in zip(_lowerCAmelCase , _lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowerCAmelCase , _lowerCAmelCase ) else 0.0
UpperCAmelCase_ =n_correct / len(_lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 550
| 1
|
def a ( a , a ) ->float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(a ) , a )
return number - int(a )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 201
|
import os
def a ( a = "matrix.txt" ) ->int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
SCREAMING_SNAKE_CASE = in_file.read()
SCREAMING_SNAKE_CASE = [[int(a ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE = len(grid[0] )
SCREAMING_SNAKE_CASE = [[0 for i in range(a )] for j in range(a )]
SCREAMING_SNAKE_CASE = grid[0][0]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __snake_case ( a__):
_lowerCAmelCase = '''vivit'''
def __init__( self, A=224, A=32, A=[2, 16, 16], A=3, A=768, A=12, A=12, A=3072, A="gelu_fast", A=0.0, A=0.0, A=0.02, A=1e-06, A=True, **A, ):
"""simple docstring"""
lowerCamelCase : str = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : Any = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : List[str] = image_size
lowerCamelCase : Any = num_frames
lowerCamelCase : Any = tubelet_size
lowerCamelCase : Any = num_channels
lowerCamelCase : List[Any] = qkv_bias
super().__init__(**A )
| 449
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''')
if not ops[op](version.parse(UpperCAmelCase__) , version.parse(UpperCAmelCase__)):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''')
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None):
lowerCamelCase : List[Any] = F'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , UpperCAmelCase__):
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = requirement, None, None
else:
lowerCamelCase : Optional[Any] = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , UpperCAmelCase__)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F''' got {requirement}''')
lowerCamelCase , lowerCamelCase : Dict = match[0]
lowerCamelCase : Dict = want_full.split(',') # there could be multiple requirements
lowerCamelCase : Union[str, Any] = {}
for w in want_range:
lowerCamelCase : int = re.findall(R'^([\s!=<>]{1,2})(.+)' , UpperCAmelCase__)
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F''' but got {requirement}''')
lowerCamelCase , lowerCamelCase : List[Any] = match[0]
lowerCamelCase : Optional[int] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys())}, but got {op}''')
# special case
if pkg == "python":
lowerCamelCase : Optional[int] = '.'.join([str(UpperCAmelCase__) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
return
# check if any version is installed
try:
lowerCamelCase : Any = importlib.metadata.version(UpperCAmelCase__)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''')
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : List[str] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(UpperCAmelCase__ , UpperCAmelCase__)
| 449
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a : Any = None
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
a : Optional[Any] = {
"facebook/mbart-large-en-ro": 10_24,
"facebook/mbart-large-cc25": 10_24,
}
# fmt: off
a : Optional[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : Optional[int] = MBartTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , snake_case=None , snake_case=None , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
vocab_file=snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , **snake_case , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
UpperCAmelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase : List[str] = {
lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : Dict = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase : List[str] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase : int = src_lang
UpperCAmelCase : List[str] = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
UpperCAmelCase : List[str] = self.convert_tokens_to_ids(snake_case )
UpperCAmelCase : List[Any] = tgt_lang_id
return inputs
def A_ ( self , snake_case , snake_case = "en_XX" , snake_case = None , snake_case = "ro_RO" , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = src_lang
UpperCAmelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def A_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.convert_tokens_to_ids(snake_case )
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : str = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.convert_tokens_to_ids(snake_case )
UpperCAmelCase : str = []
UpperCAmelCase : str = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCAmelCase : Optional[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 679
|
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679
| 1
|
from math import pi, sqrt
def _UpperCAmelCase ( UpperCAmelCase : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : int = 1.0
while num:
__UpperCamelCase : List[Any] = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 700
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__UpperCamelCase : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__lowerCamelCase : Any = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
__lowerCamelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase : List[Any] = value
elif weight_type == "weight_g":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_v":
__lowerCamelCase : List[Any] = value
elif weight_type == "bias":
__lowerCamelCase : Optional[int] = value
else:
__lowerCamelCase : Dict = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Tuple = []
__lowerCamelCase : Union[str, Any] = fairseq_model.state_dict()
__lowerCamelCase : Optional[Any] = hf_model.feature_extractor
__lowerCamelCase : List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
__lowerCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase : Any = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase : Union[str, Any] = True
if "*" in mapped_key:
__lowerCamelCase : int = name.split(UpperCAmelCase )[0].split(""".""" )[-2]
__lowerCamelCase : Dict = mapped_key.replace("""*""" , UpperCAmelCase )
if "weight_g" in name:
__lowerCamelCase : int = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase : List[str] = """weight_v"""
elif "bias" in name:
__lowerCamelCase : str = """bias"""
elif "weight" in name:
__lowerCamelCase : List[str] = """weight"""
else:
__lowerCamelCase : int = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase : Tuple = name.split(""".""" )
__lowerCamelCase : Tuple = int(items[0] )
__lowerCamelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = full_name.split("""adaptor.""" )[-1]
__lowerCamelCase : Any = name.split(""".""" )
if items[1].isdigit():
__lowerCamelCase : Dict = int(items[1] )
else:
__lowerCamelCase : List[str] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__lowerCamelCase : str = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__lowerCamelCase : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__lowerCamelCase : Optional[int] = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__lowerCamelCase : List[str] = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__lowerCamelCase : int = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__lowerCamelCase : Tuple = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : List[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Tuple = emb.weight.shape
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , ):
"""simple docstring"""
__lowerCamelCase : int = WavaVecaConfig.from_pretrained(
UpperCAmelCase , add_adapter=UpperCAmelCase , adapter_stride=UpperCAmelCase , adapter_kernel_size=UpperCAmelCase , use_auth_token=UpperCAmelCase , output_hidden_size=UpperCAmelCase , )
__lowerCamelCase : Optional[int] = MBartConfig.from_pretrained(UpperCAmelCase )
# load model
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
__lowerCamelCase : Union[str, Any] = model[0].eval()
# load feature extractor
__lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , use_auth_token=UpperCAmelCase )
# set weights for wav2vec2 encoder
__lowerCamelCase : Tuple = WavaVecaModel(UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase )
# load decoder weights
__lowerCamelCase : Dict = MBartForCausalLM(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowerCamelCase : List[Any] = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[str] = MBartaaTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : int = hf_wavavec.config.to_dict()
__lowerCamelCase : str = tokenizer.pad_token_id
__lowerCamelCase : Optional[Any] = tokenizer.bos_token_id
__lowerCamelCase : Dict = tokenizer.eos_token_id
__lowerCamelCase : Tuple = """mbart50"""
__lowerCamelCase : List[str] = """wav2vec2"""
__lowerCamelCase : List[str] = tokenizer.eos_token_id
__lowerCamelCase : Optional[int] = 250_004
__lowerCamelCase : Dict = tokenizer.eos_token_id
__lowerCamelCase : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
__UpperCamelCase : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 458
| 0
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def _snake_case ( snake_case__ : dict , snake_case__ : str , snake_case__ : set , snake_case__ : set , snake_case__ : dict , snake_case__ : dict , snake_case__ : PriorityQueue , snake_case__ : dict , snake_case__ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A = cst_fwd.get(snake_case__ , np.inf )
A = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A = new_cost_f
A = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : dict , snake_case__ : dict ):
A = -1
A = set()
A = set()
A = {source: 0}
A = {destination: 0}
A = {source: None}
A = {destination: None}
A = PriorityQueue()
A = PriorityQueue()
A = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A , A = queue_forward.get()
visited_forward.add(snake_case__ )
A , A = queue_backward.get()
visited_backward.add(snake_case__ )
A = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
A = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A = shortest_distance
return shortest_path_distance
_lowercase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
_lowercase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def _snake_case ( ):
A = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A = get_sagemaker_input()
else:
A = get_cluster_input()
return config
def _snake_case ( snake_case__ : Any=None ):
if subparsers is not None:
A = subparsers.add_parser('config' , description=snake_case__ )
else:
A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ )
parser.add_argument(
'--config_file' , default=snake_case__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def _snake_case ( snake_case__ : Tuple ):
A = get_user_input()
if args.config_file is not None:
A = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
A = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(F'accelerate configuration saved at {config_file}' )
def _snake_case ( ):
A = config_command_parser()
A = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 91
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Optional[Any] = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : List[Any] = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189
| 1
|
import numpy as np
def a__ ( A__, A__ ):
return np.where(vector > 0, A__, (alpha * (np.exp(A__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
from manim import *
class a__ ( __snake_case ):
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = Rectangle(height=0.5 , width=0.5 )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a = Rectangle(height=0.25 , width=0.25 )
__a = [mem.copy() for i in range(6 )]
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('CPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(4 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('GPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Model' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
__a = []
__a = []
for i, rect in enumerate(UpperCAmelCase ):
__a = fill.copy().set_fill(UpperCAmelCase , opacity=0.8 )
target.move_to(UpperCAmelCase )
model_arr.append(UpperCAmelCase )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase )
__a = [meta_mem.copy() for i in range(6 )]
__a = [meta_mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Disk' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase )
__a = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase ) )
__a = Square(0.3 )
input.set_fill(UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 )
self.play(Write(UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase ) )
self.play(FadeOut(UpperCAmelCase ) )
__a = Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__a = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) )
__a = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__a = AnimationGroup(
FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__a = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__a = a_c
__a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , )
__a = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) )
self.wait()
| 559
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = int(number**0.5)
return number == sq * sq
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase_ = x_den * y_den * z_den
UpperCamelCase_ = gcd(_lowerCAmelCase , _lowerCAmelCase)
top //= hcf
bottom //= hcf
return top, bottom
def _lowerCAmelCase (_lowerCAmelCase = 35):
UpperCamelCase_ = set()
UpperCamelCase_ = 42
UpperCamelCase_ = Fraction(0)
UpperCamelCase_ = 42
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
UpperCamelCase_ = x_num * y_den + x_den * y_num
UpperCamelCase_ = x_den * y_den
UpperCamelCase_ = gcd(_lowerCAmelCase , _lowerCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase_ = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
unique_s.add(_lowerCAmelCase)
# n=2
UpperCamelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase_ = x_den * x_den * y_den * y_den
if is_sq(_lowerCAmelCase) and is_sq(_lowerCAmelCase):
UpperCamelCase_ = int(sqrt(_lowerCAmelCase))
UpperCamelCase_ = int(sqrt(_lowerCAmelCase))
UpperCamelCase_ = gcd(_lowerCAmelCase , _lowerCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase_ = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
unique_s.add(_lowerCAmelCase)
# n=-1
UpperCamelCase_ = x_num * y_num
UpperCamelCase_ = x_den * y_num + x_num * y_den
UpperCamelCase_ = gcd(_lowerCAmelCase , _lowerCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase_ = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
unique_s.add(_lowerCAmelCase)
# n=2
UpperCamelCase_ = x_num * x_num * y_num * y_num
UpperCamelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowerCAmelCase) and is_sq(_lowerCAmelCase):
UpperCamelCase_ = int(sqrt(_lowerCAmelCase))
UpperCamelCase_ = int(sqrt(_lowerCAmelCase))
UpperCamelCase_ = gcd(_lowerCAmelCase , _lowerCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase_ = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
unique_s.add(_lowerCAmelCase)
for num, den in unique_s:
total += Fraction(_lowerCAmelCase , _lowerCAmelCase)
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 712
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase : Any ="""src/diffusers"""
UpperCAmelCase : Union[str, Any] ="""."""
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase : int =importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase : Optional[Any] =spec.loader.load_module()
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
return line.startswith(_lowerCAmelCase) or len(_lowerCAmelCase) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , _lowerCAmelCase) is not None
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = object_name.split(".")
UpperCamelCase_ = 0
# First let's find the module where our object lives.
UpperCamelCase_ = parts[i]
while i < len(_lowerCAmelCase) and not os.path.isfile(os.path.join(_lowerCAmelCase , f"""{module}.py""")):
i += 1
if i < len(_lowerCAmelCase):
UpperCamelCase_ = os.path.join(_lowerCAmelCase , parts[i])
if i >= len(_lowerCAmelCase):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""")
with open(os.path.join(_lowerCAmelCase , f"""{module}.py""") , "r" , encoding="utf-8" , newline="\n") as f:
UpperCamelCase_ = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase_ = ""
UpperCamelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCAmelCase) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCAmelCase):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""")
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase_ = line_index
while line_index < len(_lowerCAmelCase) and _should_continue(lines[line_index] , _lowerCAmelCase):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase_ = lines[start_index:line_index]
return "".join(_lowerCAmelCase)
UpperCAmelCase : Any =re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
UpperCAmelCase : Any =re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
UpperCAmelCase : Dict =re.compile(r"""<FILL\s+[^>]*>""")
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = code.split("\n")
UpperCamelCase_ = 0
while idx < len(_lowerCAmelCase) and len(lines[idx]) == 0:
idx += 1
if idx < len(_lowerCAmelCase):
return re.search(r"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = len(get_indent(_lowerCAmelCase)) > 0
if has_indent:
UpperCamelCase_ = f"""class Bla:\n{code}"""
UpperCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_lowerCAmelCase)
UpperCamelCase_ = black.format_str(_lowerCAmelCase , mode=_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ = style_docstrings_in_code(_lowerCAmelCase)
return result[len("class Bla:\n") :] if has_indent else result
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=False):
with open(_lowerCAmelCase , "r" , encoding="utf-8" , newline="\n") as f:
UpperCamelCase_ = f.readlines()
UpperCamelCase_ = []
UpperCamelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCAmelCase):
UpperCamelCase_ = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = search.groups()
UpperCamelCase_ = find_code_in_diffusers(_lowerCAmelCase)
UpperCamelCase_ = get_indent(_lowerCAmelCase)
UpperCamelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase_ = theoretical_indent
UpperCamelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase_ = True
while line_index < len(_lowerCAmelCase) and should_continue:
line_index += 1
if line_index >= len(_lowerCAmelCase):
break
UpperCamelCase_ = lines[line_index]
UpperCamelCase_ = _should_continue(_lowerCAmelCase , _lowerCAmelCase) and re.search(f"""^{indent}# End copy""" , _lowerCAmelCase) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase_ = lines[start_index:line_index]
UpperCamelCase_ = "".join(_lowerCAmelCase)
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase_ = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(_lowerCAmelCase) is None]
UpperCamelCase_ = "\n".join(_lowerCAmelCase)
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCAmelCase) > 0:
UpperCamelCase_ = replace_pattern.replace("with" , "").split(",")
UpperCamelCase_ = [_re_replace_pattern.search(_lowerCAmelCase) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = pattern.groups()
UpperCamelCase_ = re.sub(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if option.strip() == "all-casing":
UpperCamelCase_ = re.sub(obja.lower() , obja.lower() , _lowerCAmelCase)
UpperCamelCase_ = re.sub(obja.upper() , obja.upper() , _lowerCAmelCase)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase_ = blackify(lines[start_index - 1] + theoretical_code)
UpperCamelCase_ = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
UpperCamelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase_ = start_index + 1
if overwrite and len(_lowerCAmelCase) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""")
with open(_lowerCAmelCase , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(_lowerCAmelCase)
return diffs
def _lowerCAmelCase (_lowerCAmelCase = False):
UpperCamelCase_ = glob.glob(os.path.join(_lowerCAmelCase , "**/*.py") , recursive=_lowerCAmelCase)
UpperCamelCase_ = []
for filename in all_files:
UpperCamelCase_ = is_copy_consistent(_lowerCAmelCase , _lowerCAmelCase)
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_lowerCAmelCase) > 0:
UpperCamelCase_ = "\n".join(_lowerCAmelCase)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
UpperCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase : Tuple =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 504
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE( snake_case_ ):
_UpperCAmelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self: Optional[int] , **UpperCamelCase: Dict ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case__ = deprecated_arg[3:]
snake_case__ = not kwargs.pop(UpperCamelCase )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case__ = kwargs.pop('tpu_name' , self.tpu_name )
snake_case__ = kwargs.pop('device_idx' , self.device_idx )
snake_case__ = kwargs.pop('eager_mode' , self.eager_mode )
snake_case__ = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**UpperCamelCase )
_UpperCAmelCase = field(
default=snake_case_ , metadata={"help": "Name of TPU"} , )
_UpperCAmelCase = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_UpperCAmelCase = field(default=snake_case_ , metadata={"help": "Benchmark models in eager model."} )
_UpperCAmelCase = field(
default=snake_case_ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
snake_case__ = None
if self.tpu:
try:
if self.tpu_name:
snake_case__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
snake_case__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
snake_case__ = None
return tpu
@cached_property
def lowerCAmelCase_ ( self: int ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
snake_case__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
snake_case__ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
snake_case__ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase_ ( self: Tuple ) -> bool:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def lowerCAmelCase_ ( self: int ) -> int:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase_ ( self: Dict ) -> bool:
return self.n_gpu > 0
| 328
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : "DiagonalGaussianDistribution"
class UpperCAmelCase( snake_case_ , snake_case_ ):
"""simple docstring"""
a : str = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_82_15 , ) -> Tuple:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowercase__ : List[Any] = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
lowercase__ : Union[str, Any] = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
lowercase__ : Optional[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
lowercase__ : Optional[int] = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
lowercase__ : List[Any] = False
lowercase__ : str = False
# only relevant if vae tiling is enabled
lowercase__ : int = self.config.sample_size
lowercase__ : Dict = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
lowercase__ : Any = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
lowercase__ : str = 0.25
def __a ( self , lowerCamelCase , lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
lowercase__ : int = value
def __a ( self , lowerCamelCase = True ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = use_tiling
def __a ( self ) -> Any:
"""simple docstring"""
self.enable_tiling(lowerCamelCase )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : str = True
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __a ( self ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
lowercase__ : Any = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
lowercase__ : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def __a ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
lowercase__ : int = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
lowercase__ : Any = torch.cat(lowerCamelCase )
else:
lowercase__ : Optional[int] = self.encoder(lowerCamelCase )
lowercase__ : Optional[Any] = self.quant_conv(lowerCamelCase )
lowercase__ : str = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
lowercase__ : Tuple = self.post_quant_conv(lowerCamelCase )
lowercase__ : List[Any] = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
lowercase__ : Optional[Any] = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
lowercase__ : Dict = torch.cat(lowerCamelCase )
else:
lowercase__ : Dict = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
lowercase__ : Union[str, Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : str = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
lowercase__ : List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> AutoencoderKLOutput:
"""simple docstring"""
lowercase__ : List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
lowercase__ : Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
lowercase__ : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowercase__ : Optional[int] = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
lowercase__ : int = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
lowercase__ : Optional[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowercase__ : Any = self.encoder(lowerCamelCase )
lowercase__ : Optional[int] = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
lowercase__ : List[str] = []
for i, row in enumerate(lowerCamelCase ):
lowercase__ : Optional[int] = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowercase__ : Dict = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
lowercase__ : Any = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
lowercase__ : Dict = torch.cat(lowerCamelCase , dim=2 )
lowercase__ : List[str] = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
lowercase__ : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
lowercase__ : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
lowercase__ : Union[str, Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowercase__ : List[Any] = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
lowercase__ : Dict = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
lowercase__ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowercase__ : int = self.post_quant_conv(lowerCamelCase )
lowercase__ : Optional[Any] = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
lowercase__ : List[str] = []
for i, row in enumerate(lowerCamelCase ):
lowercase__ : str = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowercase__ : Tuple = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
lowercase__ : Optional[int] = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
lowercase__ : str = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
lowercase__ : Optional[int] = sample
lowercase__ : List[Any] = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
lowercase__ : Union[str, Any] = posterior.sample(generator=lowerCamelCase )
else:
lowercase__ : int = posterior.mode()
lowercase__ : Tuple = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 397
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , __a : Union[str, Any] , __a : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_a = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : int , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : float = 0.0 , __a : int = 50 , __a : Optional[bool] = None , __a : Optional[str] = "pil" , __a : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __a ):
_a = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_a = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__a )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_a = randn_tensor(__a , generator=__a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_a = self.unet(__a , __a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_a = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a ).prev_sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 521
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['pixel_values']
def __init__( self : Optional[int] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Union[int, float] = 1 / 2_55 , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , **__a : Any , ):
super().__init__(**__a )
_a = size if size is not None else {"shortest_edge": 2_24}
_a = get_size_dict(__a , default_to_square=__a )
_a = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
_a = get_size_dict(__a , param_name="crop_size" )
_a = do_resize
_a = size
_a = resample
_a = do_rescale
_a = rescale_factor
_a = do_center_crop
_a = crop_size
_a = do_flip_channel_order
def UpperCamelCase__ ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PIL.Image.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ):
_a = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ):
_a = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def UpperCamelCase__ ( self : str , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def UpperCamelCase__ ( self : List[Any] , __a : np.ndarray , __a : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(__a , data_format=__a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : int , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = resample if resample is not None else self.resample
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_a = size if size is not None else self.size
_a = get_size_dict(__a , default_to_square=__a )
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__a , param_name="crop_size" )
_a = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_a = [to_numpy_array(__a ) for image in images]
if do_resize:
_a = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
_a = [self.rescale(image=__a , scale=__a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_a = [self.flip_channel_order(image=__a ) for image in images]
_a = [to_channel_dimension_format(__a , __a ) for image in images]
_a = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Tuple] = None ):
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a ) != len(__a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__a ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(__a ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__a )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__a )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 521
| 1
|
'''simple docstring'''
from PIL import Image
def lowercase__ ( __UpperCamelCase : Image ):
'''simple docstring'''
__lowercase , __lowercase = image.size
__lowercase = 0
__lowercase = image.load()
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
__lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__UpperCamelCase ):
for i in range(__UpperCamelCase ):
__lowercase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
snake_case : Optional[Any] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 566
|
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase__ ( *__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase__ ( __UpperCamelCase : Exception ):
'''simple docstring'''
__lowercase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase__ ( __UpperCamelCase : callable = None , __UpperCamelCase : int = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
__lowercase = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 566
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Union[str, Any] = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706
|
from scipy.stats import spearmanr
import datasets
A__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
A__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
A__ = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict=False ):
snake_case__ : str = spearmanr(__lowercase ,__lowercase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 219
| 0
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Optional[Any] = {"vocab_file": "spiece.model"}
_a : Tuple = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_a : Union[str, Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
_a : Any = "▁"
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Any = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_=1_00 , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_=True , **UpperCamelCase_ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCAmelCase : List[str] = len(set(filter(lambda UpperCamelCase_ : bool("extra_id" in str(UpperCamelCase_ ) ) , UpperCamelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__UpperCAmelCase : Dict = legacy
__UpperCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Tuple = vocab_file
__UpperCAmelCase : Tuple = extra_ids
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@staticmethod
def _snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__UpperCAmelCase : Dict = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , UpperCamelCase_ , )
return max_model_length
@property
def _snake_case ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def _snake_case ( self ):
__UpperCAmelCase : Tuple = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase_ )) + [1]
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self ):
return list(
set(filter(lambda UpperCamelCase_ : bool(re.search(r"<extra_id_\d+>" , UpperCamelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def _snake_case ( self ):
return [self._convert_token_to_id(UpperCamelCase_ ) for token in self.get_sentinel_tokens()]
def _snake_case ( self , UpperCamelCase_ ):
if len(UpperCamelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_ )
if token_ids_a is None:
return token_ids_a
else:
__UpperCAmelCase : Union[str, Any] = self._add_eos_if_not_present(UpperCamelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self ):
__UpperCAmelCase : Optional[int] = self.__dict__.copy()
__UpperCAmelCase : Any = None
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , UpperCamelCase_ , **UpperCamelCase_ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__UpperCAmelCase : Optional[Any] = SPIECE_UNDERLINE + text.replace(UpperCamelCase_ , " " )
return super().tokenize(UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , **UpperCamelCase_ ):
if not self.legacy:
__UpperCAmelCase : Optional[Any] = text.startswith(UpperCamelCase_ )
if is_first:
__UpperCAmelCase : str = text[1:]
__UpperCAmelCase : Tuple = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(UpperCamelCase_ ):
__UpperCAmelCase : Dict = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _snake_case ( self , UpperCamelCase_ ):
if token.startswith("<extra_id_" ):
__UpperCAmelCase : int = re.match(r"<extra_id_(\d+)>" , UpperCamelCase_ )
__UpperCAmelCase : Any = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if index < self.sp_model.get_piece_size():
__UpperCAmelCase : str = self.sp_model.IdToPiece(UpperCamelCase_ )
else:
__UpperCAmelCase : Union[str, Any] = f"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = []
__UpperCAmelCase : Dict = ""
__UpperCAmelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
__UpperCAmelCase : str = True
__UpperCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 168
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase : int = len(lowerCamelCase__ )
# We need to create solution object to save path.
__UpperCAmelCase : List[str] = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
__UpperCAmelCase : Optional[Any] = run_maze(lowerCamelCase__ , 0 , 0 , lowerCamelCase__ )
if solved:
print("\n".join(str(lowerCamelCase__ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase : str = len(lowerCamelCase__ )
# Final check point.
if i == j == (size - 1):
__UpperCAmelCase : str = 1
return True
__UpperCAmelCase : Any = (not i < 0) and (not j < 0) # Check lower bounds
__UpperCAmelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__UpperCAmelCase : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__UpperCAmelCase : Optional[int] = 1
# check for directions
if (
run_maze(lowerCamelCase__ , i + 1 , lowerCamelCase__ , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , lowerCamelCase__ , j + 1 , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , i - 1 , lowerCamelCase__ , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , lowerCamelCase__ , j - 1 , lowerCamelCase__ )
):
return True
__UpperCAmelCase : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 1
|
import numpy as np
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] =(0, 0)
SCREAMING_SNAKE_CASE_ : Optional[int] =None
SCREAMING_SNAKE_CASE_ : Optional[int] =0
SCREAMING_SNAKE_CASE_ : List[str] =0
SCREAMING_SNAKE_CASE_ : Tuple =0
def __eq__( self , __UpperCAmelCase ):
return self.position == cell.position
def __lowerCamelCase ( self ):
print(self.position )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase=(5, 5) ):
SCREAMING_SNAKE_CASE_ : List[Any] =np.zeros(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] =world_size[0]
SCREAMING_SNAKE_CASE_ : List[str] =world_size[1]
def __lowerCamelCase ( self ):
print(self.w )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE_ : Optional[Any] =cell.position[0]
SCREAMING_SNAKE_CASE_ : Tuple =cell.position[1]
SCREAMING_SNAKE_CASE_ : List[str] =[]
for n in neughbour_cord:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =current_x + n[0]
SCREAMING_SNAKE_CASE_ : Optional[int] =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE_ : List[Any] =Cell()
SCREAMING_SNAKE_CASE_ : int =(x, y)
SCREAMING_SNAKE_CASE_ : Optional[Any] =cell
neighbours.append(__UpperCamelCase )
return neighbours
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =[]
SCREAMING_SNAKE_CASE_ : List[Any] =[]
_open.append(UpperCAmelCase__ )
while _open:
SCREAMING_SNAKE_CASE_ : Dict =np.argmin([n.f for n in _open] )
SCREAMING_SNAKE_CASE_ : Optional[Any] =_open[min_f]
_closed.append(_open.pop(UpperCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase__ ):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE_ : Tuple =current.g + 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =n.position
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =goal.position
SCREAMING_SNAKE_CASE_ : List[str] =(ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE_ : List[str] =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] =[]
while current.parent is not None:
path.append(current.position )
SCREAMING_SNAKE_CASE_ : Dict =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = Gridworld()
# Start position and goal
__SCREAMING_SNAKE_CASE = Cell()
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = Cell()
__SCREAMING_SNAKE_CASE = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
__SCREAMING_SNAKE_CASE = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__SCREAMING_SNAKE_CASE = 1
print(world.w)
| 715
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase = Features({'text': Value('string' )} )
_lowercase = Features({'labels': ClassLabel} )
_lowercase = "text"
_lowercase = "labels"
def __lowerCamelCase ( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : List[str] =self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : Tuple =features[self.label_column]
SCREAMING_SNAKE_CASE_ : str =label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 153
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
__A = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__A = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase__ ( A_: list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
__UpperCAmelCase =[]
for i in range(len(A_ ) ):
__UpperCAmelCase =[]
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCAmelCase =0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCAmelCase =cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowercase__ ( A_: list[list[int]] , A_: int ) -> list[Image.Image]:
"""simple docstring"""
__UpperCAmelCase =[]
for _ in range(A_ ):
# Create output image
__UpperCAmelCase =Image.new("""RGB""" , (len(cells[0] ), len(A_ )) )
__UpperCAmelCase =img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
__UpperCAmelCase =255 - cells[y][x] * 255
__UpperCAmelCase =(colour, colour, colour)
# Save image
images.append(A_ )
__UpperCAmelCase =new_generation(A_ )
return images
if __name__ == "__main__":
__A = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 68
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
__a = 10
__a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
__a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(_A ) ),
} , features=_A , )
return dataset
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=_A )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : Tuple = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "file.txt"
__a = FILE_CONTENT
with open(_A , "w" ) as f:
f.write(_A )
return filename
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import bza
__a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
__a = bytes(_A , "utf-8" )
with bza.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
__a = bytes(_A , "utf-8" )
with gzip.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
__a = bytes(_A , "utf-8" )
with lza.frame.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(_A , "w" ) as archive:
archive.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import tarfile
__a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import lzma
__a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
__a = bytes(_A , "utf-8" )
with lzma.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import zipfile
__a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
__a = bytes(_A , "utf-8" )
with zstd.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "file.xml"
__a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(_A , "w" ) as f:
f.write(_A )
return filename
SCREAMING_SNAKE_CASE : Any = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
SCREAMING_SNAKE_CASE : Optional[int] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
SCREAMING_SNAKE_CASE : Any = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : int = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
SCREAMING_SNAKE_CASE : List[Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = datasets.Dataset.from_dict(_A )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(_A ) ) as con:
__a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(_A , "w" , newline="" ) as f:
__a = csv.DictWriter(_A , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(_A , "w" , newline="" ) as f:
__a = csv.DictWriter(_A , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import bza
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(_A , "rb" ) as f:
__a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(_A , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
__a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(_A , "wb" ) as f:
__a = pq.ParquetWriter(_A , schema=_A )
__a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_A ) )] for k in DATA[0]} , schema=_A )
writer.write_table(_A )
writer.close()
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"data": DATA}
with open(_A , "w" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"data": DATA_DICT_OF_LISTS}
with open(_A , "w" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(_A , "w" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(_A , "w" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(_A , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(_A , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(_A , "rb" ) as orig_file:
with gzip.open(_A , "wb" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(_A , "rb" ) as orig_file:
with gzip.open(_A , "wb" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("nested" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.join("nested" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename("unsupported.ext" ) )
f.write(_A , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 197
| 0
|
"""simple docstring"""
import random
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : List[Any] ) -> tuple:
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase__ )
elif element > pivot:
greater.append(lowerCAmelCase__ )
else:
equal.append(lowerCAmelCase__ )
return less, equal, greater
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> Union[str, Any]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowerCAmelCase__ ) or index < 0:
return None
__a = items[random.randint(0 , len(lowerCAmelCase__ ) - 1 )]
__a = 0
__a , __a , __a = _partition(lowerCAmelCase__ , lowerCAmelCase__ )
__a = len(lowerCAmelCase__ )
__a = len(lowerCAmelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase__ , lowerCAmelCase__ )
# must be in larger
else:
return quick_select(lowerCAmelCase__ , index - (m + count) )
| 65
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65
| 1
|
import re
from filelock import FileLock
try:
import nltk
__A = True
except (ImportError, ModuleNotFoundError):
__A = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 59
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str = "cpu" , __A : Union[str, None] = None ) -> None:
_SCREAMING_SNAKE_CASE = torch.load(__A , map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_SCREAMING_SNAKE_CASE = v.half()
if save_path is None: # overwrite src_path
_SCREAMING_SNAKE_CASE = src_path
torch.save(__A , __A )
if __name__ == "__main__":
fire.Fire(convert)
| 418
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={"vocab_file": "spiece.model"}
__A ={
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
__A ={"bert_for_seq_generation": 5_1_2}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = []
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , a_ : str , a_ : List[Any]="<s>" , a_ : List[str]="</s>" , a_ : Dict="<unk>" , a_ : Dict="<pad>" , a_ : Optional[Any]="<::::>" , a_ : Optional[Dict[str, Any]] = None , **a_ : Union[str, Any] , ):
'''simple docstring'''
__UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sep_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
__UpperCAmelCase : Optional[Any] = vocab_file
__UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def snake_case__ ( self : Tuple ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.__dict__.copy()
__UpperCAmelCase : Dict = None
return state
def __setstate__( self : Optional[Any] , a_ : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : int , a_ : str ):
'''simple docstring'''
return self.sp_model.encode(a_ , out_type=a_ )
def snake_case__ ( self : Optional[int] , a_ : Optional[Any] ):
'''simple docstring'''
return self.sp_model.piece_to_id(a_ )
def snake_case__ ( self : Any , a_ : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(a_ )
return token
def snake_case__ ( self : str , a_ : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Dict = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
__UpperCAmelCase : Any = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def snake_case__ ( self : Optional[Any] , a_ : str , a_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Dict = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , '''wb''' ) as fi:
__UpperCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 706
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A =get_tests_dir("fixtures")
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = mock.Mock()
__UpperCAmelCase : int = 5_00
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[Any] = HTTPError
__UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=a_ ) as mock_head:
__UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case__ ( cls : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(a_ )
@classmethod
def snake_case__ ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(a_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
__UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a_ , repo_id='''test-feature-extractor''' , push_to_hub=a_ , use_auth_token=self._token )
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(a_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
__UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=a_ , use_auth_token=self._token )
__UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCAmelCase : List[Any] = CustomFeatureExtractor.from_pretrained(a_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
__UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 241
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="xlm-roberta-xl"
def __init__( self : Any , a : Union[str, Any]=25_08_80 , a : int=25_60 , a : List[Any]=36 , a : Optional[int]=32 , a : Any=1_02_40 , a : List[str]="gelu" , a : Tuple=0.1 , a : List[str]=0.1 , a : Optional[int]=5_14 , a : List[Any]=1 , a : int=0.02 , a : Any=1e-0_5 , a : List[Any]=1 , a : Any=0 , a : str=2 , a : Optional[int]="absolute" , a : str=True , a : Tuple=None , **a : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class a__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 546
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> float:
__lowerCamelCase = 0
while len(UpperCamelCase__ ) > 1:
__lowerCamelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__lowerCamelCase = files.index(min(UpperCamelCase__ ) )
temp += files[min_index]
files.pop(UpperCamelCase__ )
files.append(UpperCamelCase__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99
|
from __future__ import annotations
import time
a__ = list[tuple[int, int]]
a__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Node | None):
'''simple docstring'''
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = parent
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : tuple[int, int]):
'''simple docstring'''
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase__)
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase__)
snake_case__ = [self.start]
snake_case__ = False
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
while self.node_queue:
snake_case__ = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
snake_case__ = True
return self.retrace_path(UpperCamelCase__)
snake_case__ = self.get_successors(UpperCamelCase__)
for node in successors:
self.node_queue.append(UpperCamelCase__)
if not self.reached:
return [self.start.pos]
return None
def __magic_name__ ( self : Any , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCamelCase__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , UpperCamelCase__))
return successors
def __magic_name__ ( self : str , UpperCamelCase__ : Node | None):
'''simple docstring'''
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
snake_case__ = current_node.parent
path.reverse()
return path
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = False
def __magic_name__ ( self : Any):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ = self.fwd_bfs.node_queue.pop(0)
snake_case__ = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ = True
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__)
snake_case__ = current_bwd_node
snake_case__ = current_fwd_node
snake_case__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase__),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Node , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = self.fwd_bfs.retrace_path(UpperCamelCase__)
snake_case__ = self.bwd_bfs.retrace_path(UpperCamelCase__)
bwd_path.pop()
bwd_path.reverse()
snake_case__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a__ = (0, 0)
a__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ = time.time()
a__ = BreadthFirstSearch(init, goal)
a__ = bfs.search()
a__ = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a__ = time.time()
a__ = BidirectionalBreadthFirstSearch(init, goal)
a__ = bd_bfs.search()
a__ = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 99
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowercase ( a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = flatten_dict(UpperCamelCase__ )
return flax_params
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__SCREAMING_SNAKE_CASE = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__SCREAMING_SNAKE_CASE = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__SCREAMING_SNAKE_CASE = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__SCREAMING_SNAKE_CASE = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__SCREAMING_SNAKE_CASE = re.sub(R'layers_(\d+)' , R'layer.\1' , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__SCREAMING_SNAKE_CASE = re.sub(R'layers_(\d+)' , R'layer.\1' , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = flax_dict[key]
__SCREAMING_SNAKE_CASE = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T )
else:
__SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowercase ( a__ , a__ , a__=False , a__=False ) -> str:
__SCREAMING_SNAKE_CASE = get_flax_param(UpperCamelCase__ )
if not use_large:
__SCREAMING_SNAKE_CASE = PixaStructVisionConfig()
__SCREAMING_SNAKE_CASE = PixaStructTextConfig()
else:
__SCREAMING_SNAKE_CASE = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__SCREAMING_SNAKE_CASE = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__SCREAMING_SNAKE_CASE = PixaStructImageProcessor()
__SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
if use_large:
__SCREAMING_SNAKE_CASE = 40_96
__SCREAMING_SNAKE_CASE = True
# mkdir if needed
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
print('Model saved in {}'.format(UpperCamelCase__ ) )
if __name__ == "__main__":
lowerCAmelCase__ : Dict =argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
lowerCAmelCase__ : Any =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 148
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''vit_mae'''
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=16 , lowerCamelCase__=512 , lowerCamelCase__=8 , lowerCamelCase__=2_048 , lowerCamelCase__=0.75 , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = decoder_num_attention_heads
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = decoder_num_hidden_layers
__lowerCamelCase = decoder_intermediate_size
__lowerCamelCase = mask_ratio
__lowerCamelCase = norm_pix_loss
| 469
| 0
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ (_UpperCAmelCase):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
a_ : Dict = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __A ( A__ ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> Any:
SCREAMING_SNAKE_CASE = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=a , required=a , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=a , required=a , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=a , required=a , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=a , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=a , default=a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=a)
def __init__( self , a , a , a , a , a , *a , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/converting')
self._logger.info(f'''Loading model {model_type}''')
SCREAMING_SNAKE_CASE = model_type
SCREAMING_SNAKE_CASE = tf_checkpoint
SCREAMING_SNAKE_CASE = pytorch_dump_output
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = finetuning_task_name
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE = self._tf_checkpoint
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = self._tf_checkpoint
SCREAMING_SNAKE_CASE = ''
convert_transfo_xl_checkpoint_to_pytorch(
a , self._config , self._pytorch_dump_output , a)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 712
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ : str = logging.get_logger(__name__)
a_ : List[Any] = Dict[str, Any]
a_ : Optional[int] = List[Prediction]
@add_end_docstrings(A__ )
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> int:
super().__init__(*a , **a)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , 'vision')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def SCREAMING_SNAKE_CASE__ ( self , **a) -> int:
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *a , **a) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = load_image(a)
SCREAMING_SNAKE_CASE = torch.IntTensor([[image.height, image.width]])
SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors='pt')
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt')
SCREAMING_SNAKE_CASE = target_size
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size')
SCREAMING_SNAKE_CASE = self.model(**a)
SCREAMING_SNAKE_CASE = outputs.__class__({'target_size': target_size, **outputs})
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE = model_inputs['bbox']
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0.9) -> Dict:
SCREAMING_SNAKE_CASE = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = target_size[0].tolist()
def unnormalize(a):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1)
SCREAMING_SNAKE_CASE = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
SCREAMING_SNAKE_CASE = [unnormalize(a) for bbox in model_outputs['bbox'].squeeze(0)]
SCREAMING_SNAKE_CASE = ['score', 'label', 'box']
SCREAMING_SNAKE_CASE = [dict(zip(a , a)) for vals in zip(scores.tolist() , a , a) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(a , a , a)
SCREAMING_SNAKE_CASE = raw_annotations[0]
SCREAMING_SNAKE_CASE = raw_annotation['scores']
SCREAMING_SNAKE_CASE = raw_annotation['labels']
SCREAMING_SNAKE_CASE = raw_annotation['boxes']
SCREAMING_SNAKE_CASE = scores.tolist()
SCREAMING_SNAKE_CASE = [self.model.config.idalabel[label.item()] for label in labels]
SCREAMING_SNAKE_CASE = [self._get_bounding_box(a) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
SCREAMING_SNAKE_CASE = ['score', 'label', 'box']
SCREAMING_SNAKE_CASE = [
dict(zip(a , a))
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'])
]
return annotation
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 444
| 0
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase__ ( _lowercase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = False , **snake_case , ):
'''simple docstring'''
super().__init__(features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase : Tuple = Sql(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , sql=__lowerCamelCase , con=__lowerCamelCase , **__lowerCamelCase , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , )
# Build dataset for splits
UpperCAmelCase : int = self.builder.as_dataset(
split="train" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = None , **snake_case , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
UpperCAmelCase : Any = dataset
UpperCAmelCase : int = name
UpperCAmelCase : Optional[Any] = con
UpperCAmelCase : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase : int = num_proc
UpperCAmelCase : List[str] = to_sql_kwargs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.to_sql_kwargs.pop("sql" , __lowerCamelCase )
UpperCAmelCase : Union[str, Any] = self.to_sql_kwargs.pop("con" , __lowerCamelCase )
UpperCAmelCase : str = self.to_sql_kwargs.pop("index" , __lowerCamelCase )
UpperCAmelCase : List[Any] = self._write(index=__lowerCamelCase , **self.to_sql_kwargs )
return written
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = args
UpperCAmelCase : Optional[int] = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCAmelCase : Tuple = query_table(
table=self.dataset.data , key=slice(__lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase : Optional[Any] = batch.to_pandas()
UpperCAmelCase : Optional[Any] = df.to_sql(self.name , self.con , index=__lowerCamelCase , **__lowerCamelCase )
return num_rows or len(__lowerCamelCase )
def A_ ( self , snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCamelCase , __lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 679
|
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_snake_case = sys.version_info >= (3, 10)
def _UpperCamelCase ( snake_case__=None, snake_case__=None ) -> str:
return field(default_factory=lambda: default, metadata=snake_case__ )
@dataclass
class _snake_case :
lowerCamelCase__: int
lowerCamelCase__: float
lowerCamelCase__: str
lowerCamelCase__: bool
@dataclass
class _snake_case :
lowerCamelCase__: int = 42
lowerCamelCase__: str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class _snake_case :
lowerCamelCase__: bool = False
lowerCamelCase__: bool = True
lowerCamelCase__: Optional[bool] = None
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[Any] = "titi"
lowerCamelCase__: Union[str, Any] = "toto"
class _snake_case ( _lowercase ):
lowerCamelCase__: List[str] = "titi"
lowerCamelCase__: int = "toto"
lowerCamelCase__: Optional[Any] = 42
@dataclass
class _snake_case :
lowerCamelCase__: BasicEnum = "toto"
def _lowerCamelCase ( self: List[str] ) -> Any:
__UpperCAmelCase : List[Any] = BasicEnum(self.foo )
@dataclass
class _snake_case :
lowerCamelCase__: MixedTypeEnum = "toto"
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class _snake_case :
lowerCamelCase__: Optional[int] = None
lowerCamelCase__: Optional[float] = field(default=_lowercase , metadata={"help": "help message"} )
lowerCamelCase__: Optional[str] = None
lowerCamelCase__: Optional[List[str]] = list_field(default=[] )
lowerCamelCase__: Optional[List[int]] = list_field(default=[] )
@dataclass
class _snake_case :
lowerCamelCase__: List[int] = list_field(default=[] )
lowerCamelCase__: List[int] = list_field(default=[1, 2, 3] )
lowerCamelCase__: List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
lowerCamelCase__: List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _snake_case :
lowerCamelCase__: List[int] = field()
lowerCamelCase__: str = field()
lowerCamelCase__: BasicEnum = field()
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class _snake_case :
lowerCamelCase__: int
lowerCamelCase__: "BasicEnum" = field()
lowerCamelCase__: "Optional[bool]" = None
lowerCamelCase__: "str" = field(default="toto" , metadata={"help": "help message"} )
lowerCamelCase__: "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class _snake_case :
lowerCamelCase__: bool = False
lowerCamelCase__: bool = True
lowerCamelCase__: bool | None = None
@dataclass
class _snake_case :
lowerCamelCase__: int | None = None
lowerCamelCase__: float | None = field(default=_lowercase , metadata={"help": "help message"} )
lowerCamelCase__: str | None = None
lowerCamelCase__: list[str] | None = list_field(default=[] )
lowerCamelCase__: list[int] | None = list_field(default=[] )
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: str , __lowerCamelCase: argparse.ArgumentParser , __lowerCamelCase: argparse.ArgumentParser ) -> Optional[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase : Union[str, Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != "container"}
__UpperCAmelCase : Union[str, Any] = {k: v for k, v in vars(__lowerCamelCase ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __lowerCamelCase ) and yy.get("choices" , __lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__lowerCamelCase ) , yy["type"](__lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : str = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("--bar" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("--baz" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("--flag" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="?" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((__UpperCAmelCase) , ) : int = parser.parse_args_into_dataclasses(__lowerCamelCase , look_for_args_file=__lowerCamelCase )
self.assertFalse(example.flag )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__lowerCamelCase )
expected.add_argument("--baz" , default="toto" , type=__lowerCamelCase , help="help message" )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="?" )
expected.add_argument("--baz" , type=__lowerCamelCase , default=__lowerCamelCase , const=__lowerCamelCase , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__lowerCamelCase , dest="baz" )
expected.add_argument("--opt" , type=__lowerCamelCase , default=__lowerCamelCase )
__UpperCAmelCase : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
__UpperCAmelCase : Union[str, Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
__UpperCAmelCase : Tuple = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
__UpperCAmelCase : List[str] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , baz=__lowerCamelCase , opt=__lowerCamelCase ) )
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__UpperCAmelCase : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase : Tuple = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase : List[Any] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase : str = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
@dataclass
class _snake_case :
lowerCamelCase__: Literal["titi", "toto", 42] = "toto"
__UpperCAmelCase : Any = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__UpperCAmelCase : Any = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__UpperCAmelCase : Any = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : str = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__lowerCamelCase )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__lowerCamelCase )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__lowerCamelCase )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
__lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase : Dict = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
__UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("--bar" , default=__lowerCamelCase , type=__lowerCamelCase , help="help message" )
expected.add_argument("--baz" , default=__lowerCamelCase , type=__lowerCamelCase )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__lowerCamelCase )
expected.add_argument("--des" , nargs="+" , default=[] , type=__lowerCamelCase )
__UpperCAmelCase : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCamelCase )
for dataclass_type in dataclass_types:
__UpperCAmelCase : Union[str, Any] = HfArgumentParser(__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = parser.parse_args([] )
self.assertEqual(__lowerCamelCase , Namespace(foo=__lowerCamelCase , bar=__lowerCamelCase , baz=__lowerCamelCase , ces=[] , des=[] ) )
__UpperCAmelCase : List[str] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Union[str, Any] = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument("--required_str" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__lowerCamelCase , )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__lowerCamelCase , required=__lowerCamelCase )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__lowerCamelCase , )
expected.add_argument("--opt" , type=__lowerCamelCase , default=__lowerCamelCase )
expected.add_argument("--baz" , default="toto" , type=__lowerCamelCase , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__lowerCamelCase )
self.argparsersEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : Tuple = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : Tuple = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
__UpperCAmelCase : str = parser.parse_dict(__lowerCamelCase )[0]
__UpperCAmelCase : Tuple = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : List[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__lowerCamelCase , parser.parse_dict , __lowerCamelCase , allow_extra_keys=__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[Any]:
__UpperCAmelCase : List[str] = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : Dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Any = os.path.join(__lowerCamelCase , "temp_json" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
__UpperCAmelCase : Optional[Any] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = HfArgumentParser(__lowerCamelCase )
__UpperCAmelCase : List[str] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : List[Any] = os.path.join(__lowerCamelCase , "temp_yaml" )
os.mkdir(__lowerCamelCase )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
__UpperCAmelCase : List[str] = BasicExample(**__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Any = HfArgumentParser(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 382
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__A )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
super().__init__(*__a , **__a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case ( self , __a=None ):
__lowerCAmelCase = {}
if top_k is not None:
__lowerCAmelCase = top_k
return {}, {}, postprocess_params
def __call__( self , __a , **__a ):
return super().__call__(__a , **__a )
def snake_case ( self , __a ):
__lowerCAmelCase = load_image(__a )
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def snake_case ( self , __a ):
__lowerCAmelCase = self.model(**__a )
return model_outputs
def snake_case ( self , __a , __a=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase = probs.topk(__a )
elif self.framework == "tf":
__lowerCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowerCAmelCase = tf.math.top_k(__a , k=__a )
__lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase = scores.tolist()
__lowerCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 708
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self , __a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__lowerCAmelCase = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sgugger/tiny-distilbert-classification"
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a , [config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a , [config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a , [config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
__lowerCAmelCase = "patrickvonplaten/t5-tiny-random"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a , configs=[config] )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a )
__lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__a , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__a , "env.csv" ) , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__a , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__a , "env.csv" ) ).exists() )
def snake_case ( self ):
__lowerCAmelCase = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , "sequential" ) )
self.assertTrue(hasattr(__a , "cumulative" ) )
self.assertTrue(hasattr(__a , "current" ) )
self.assertTrue(hasattr(__a , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , "log.txt" ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
__lowerCAmelCase = TensorFlowBenchmark(__a )
__lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , "log.txt" ) ).exists() )
| 282
| 0
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1_6000 ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : int = int(round(sample_rate * max_length ) )
if len(UpperCamelCase ) <= sample_length:
return wav
__UpperCAmelCase : int = randint(0 , len(UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a__ :
lowercase_ = field(default=__magic_name__ , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "A file containing the training audio paths and labels."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "A file containing the validation audio paths and labels."} )
lowercase_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase_ = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase_ = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowercase_ = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class a__ :
lowercase_ = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowercase_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Name or path of preprocessor config."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def a_ ( self : Optional[int]):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , UpperCamelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__UpperCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
__UpperCAmelCase : Optional[int] = DatasetDict()
__UpperCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__UpperCAmelCase : List[str] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__UpperCAmelCase : Tuple = feature_extractor.model_input_names[0]
def train_transforms(UpperCamelCase ):
__UpperCAmelCase : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
__UpperCAmelCase : int = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCamelCase )
__UpperCAmelCase : Any = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
__UpperCAmelCase : Any = {model_input_name: inputs.get(UpperCamelCase )}
__UpperCAmelCase : int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCamelCase ):
__UpperCAmelCase : Any = [audio["array"] for audio in batch[data_args.audio_column_name]]
__UpperCAmelCase : List[Any] = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
__UpperCAmelCase : Union[str, Any] = {model_input_name: inputs.get(UpperCamelCase )}
__UpperCAmelCase : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCAmelCase : Tuple = raw_datasets["train"].features[data_args.label_column_name].names
__UpperCAmelCase , __UpperCAmelCase : List[Any] = {}, {}
for i, label in enumerate(UpperCamelCase ):
__UpperCAmelCase : List[str] = str(UpperCamelCase )
__UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
__UpperCAmelCase : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase ):
__UpperCAmelCase : str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=eval_pred.label_ids )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel=UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase : Any = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Union[str, Any] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase )
# Initialize our trainer
__UpperCAmelCase : Optional[int] = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Tuple = last_checkpoint
__UpperCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("eval" , UpperCamelCase )
trainer.save_metrics("eval" , UpperCamelCase )
# Write model card and (optionally) push to hub
__UpperCAmelCase : Any = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
if __name__ == "__main__":
main()
| 77
|
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( lowercase__ :Union[dict, list, tuple, torch.Tensor] ):
__lowerCamelCase = []
if isinstance(lowercase__, lowercase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def a_ ( lowercase__ :int, lowercase__ :Tuple[int, ...] ):
__lowerCamelCase = []
for d in reversed(lowercase__ ):
idx.append(flat_idx % d )
__lowerCamelCase = flat_idx // d
return tuple(reversed(lowercase__ ) )
@torch.jit.ignore
def a_ ( lowercase__ :Sequence[int], lowercase__ :Sequence[int], lowercase__ :Sequence[int], lowercase__ :Optional[Sequence[bool]] = None, lowercase__ :Optional[Sequence[bool]] = None, ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowercase__ :List[bool] ) -> None:
__lowerCamelCase = True
for i in range(len(lowercase__ ) ):
__lowerCamelCase = -1 * (i + 1)
l[reversed_idx] &= tally
__lowerCamelCase = l[reversed_idx]
if start_edges is None:
__lowerCamelCase = [s == 0 for s in start]
reduce_edge_list(lowercase__ )
if end_edges is None:
__lowerCamelCase = [e == (d - 1) for e, d in zip(lowercase__, lowercase__ )]
reduce_edge_list(lowercase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase__ ) == 0:
return [()]
elif len(lowercase__ ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
__lowerCamelCase = []
__lowerCamelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase__, lowercase__ ):
if s == e:
path_list.append(slice(lowercase__, s + 1 ) )
else:
break
__lowerCamelCase = tuple(lowercase__ )
__lowerCamelCase = len(lowercase__ )
# start == end, and we're done
if divergence_idx == len(lowercase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase = start[divergence_idx]
return tuple(
path + (slice(lowercase__, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase = end[divergence_idx]
return tuple(
path + (slice(lowercase__, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowerCamelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a_ ( lowercase__ :torch.Tensor, lowercase__ :int, lowercase__ :int, lowercase__ :int ):
__lowerCamelCase = t.shape[:no_batch_dims]
__lowerCamelCase = list(_flat_idx_to_idx(lowercase__, lowercase__ ) )
# _get_minimal_slice_set is inclusive
__lowerCamelCase = list(_flat_idx_to_idx(flat_end - 1, lowercase__ ) )
# Get an ordered list of slices to perform
__lowerCamelCase = _get_minimal_slice_set(
lowercase__, lowercase__, lowercase__, )
__lowerCamelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a_ ( lowercase__ :Callable, lowercase__ :Dict[str, Any], lowercase__ :int, lowercase__ :int, lowercase__ :bool = False, lowercase__ :Any = None, lowercase__ :bool = False, ):
if not (len(lowercase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
__lowerCamelCase = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase__ )]
__lowerCamelCase = tuple([max(lowercase__ ) for s in zip(*lowercase__ )] )
def _prep_inputs(lowercase__ :torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowerCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowerCamelCase = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
__lowerCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowerCamelCase = tensor_tree_map(_prep_inputs, lowercase__ )
__lowerCamelCase = None
if _out is not None:
__lowerCamelCase = tensor_tree_map(lambda lowercase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
__lowerCamelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowerCamelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase__ :torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowerCamelCase = 0
__lowerCamelCase = prepped_outputs
for _ in range(lowercase__ ):
# Chunk the input
if not low_mem:
__lowerCamelCase = _select_chunk
else:
__lowerCamelCase = partial(
_chunk_slice, flat_start=lowercase__, flat_end=min(lowercase__, i + chunk_size ), no_batch_dims=len(lowercase__ ), )
__lowerCamelCase = tensor_tree_map(lowercase__, lowercase__ )
# Run the layer on the chunk
__lowerCamelCase = layer(**lowercase__ )
# Allocate space for the output
if out is None:
__lowerCamelCase = tensor_tree_map(lambda lowercase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), lowercase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase__, lowercase__ ):
def assign(lowercase__ :dict, lowercase__ :dict ) -> None:
for k, v in da.items():
if isinstance(lowercase__, lowercase__ ):
assign(lowercase__, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowerCamelCase = da[k]
assign(lowercase__, lowercase__ )
elif isinstance(lowercase__, lowercase__ ):
for xa, xa in zip(lowercase__, lowercase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowerCamelCase = xa
elif isinstance(lowercase__, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowerCamelCase = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
__lowerCamelCase = tensor_tree_map(lambda lowercase__ : t.view(orig_batch_dims + t.shape[1:] ), lowercase__ )
return out
class __snake_case :
def __init__( self: Union[str, Any] , A_: int = 5_12 , ):
__lowerCamelCase = max_chunk_size
__lowerCamelCase = None
__lowerCamelCase = None
def __a ( self: Tuple , A_: Callable , A_: tuple , A_: int ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowerCamelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowerCamelCase = [c for c in candidates if c > min_chunk_size]
__lowerCamelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A_: int ) -> bool:
try:
with torch.no_grad():
fn(*A_ , chunk_size=A_ )
return True
except RuntimeError:
return False
__lowerCamelCase = 0
__lowerCamelCase = len(A_ ) - 1
while i > min_viable_chunk_size_index:
__lowerCamelCase = test_chunk_size(candidates[i] )
if not viable:
__lowerCamelCase = (min_viable_chunk_size_index + i) // 2
else:
__lowerCamelCase = i
__lowerCamelCase = (i + len(A_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __a ( self: Tuple , A_: Iterable , A_: Iterable ):
__lowerCamelCase = True
for aa, aa in zip(A_ , A_ ):
assert type(A_ ) == type(A_ )
if isinstance(A_ , (list, tuple) ):
consistent &= self._compare_arg_caches(A_ , A_ )
elif isinstance(A_ , A_ ):
__lowerCamelCase = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
__lowerCamelCase = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
consistent &= self._compare_arg_caches(A_ , A_ )
else:
consistent &= aa == aa
return consistent
def __a ( self: str , A_: Callable , A_: tuple , A_: int , ):
__lowerCamelCase = True
__lowerCamelCase = tree_map(lambda A_ : a.shape if isinstance(A_ , torch.Tensor ) else a , A_ , A_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A_ )
__lowerCamelCase = self._compare_arg_caches(self.cached_arg_data , A_ )
else:
# Otherwise, we can reuse the precomputed value
__lowerCamelCase = False
if not consistent:
__lowerCamelCase = self._determine_favorable_chunk_size(
A_ , A_ , A_ , )
__lowerCamelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 281
| 0
|
"""simple docstring"""
def A__ ( UpperCamelCase ):
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A = grid[0]
for row_n in range(1 , len(UpperCamelCase ) ):
A = grid[row_n]
A = fill_row(UpperCamelCase , UpperCamelCase )
A = grid[row_n]
return grid[-1][-1]
def A__ ( UpperCamelCase , UpperCamelCase ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''van'''
def __init__( self :Optional[int] , __UpperCamelCase :Tuple=2_24 , __UpperCamelCase :Tuple=3 , __UpperCamelCase :int=[7, 3, 3, 3] , __UpperCamelCase :List[str]=[4, 2, 2, 2] , __UpperCamelCase :str=[64, 1_28, 3_20, 5_12] , __UpperCamelCase :Union[str, Any]=[3, 3, 12, 3] , __UpperCamelCase :Dict=[8, 8, 4, 4] , __UpperCamelCase :List[Any]="gelu" , __UpperCamelCase :str=0.02 , __UpperCamelCase :str=1e-6 , __UpperCamelCase :Tuple=1e-2 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :List[Any]=0.0 , **__UpperCamelCase :List[str] , ):
super().__init__(**__UpperCamelCase )
A = image_size
A = num_channels
A = patch_sizes
A = strides
A = hidden_sizes
A = depths
A = mlp_ratios
A = hidden_act
A = initializer_range
A = layer_norm_eps
A = layer_scale_init_value
A = drop_path_rate
A = dropout_rate
| 524
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : Tuple = "roc_bert"
def __init__( self : int , lowerCamelCase_ : str=3_0522 , lowerCamelCase_ : int=768 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=3072 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[Any]=512 , lowerCamelCase_ : int=2 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : Dict=1E-12 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : int="absolute" , lowerCamelCase_ : str=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : str=768 , lowerCamelCase_ : List[Any]=910 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=2_4858 , lowerCamelCase_ : Dict=True , **lowerCamelCase_ : List[str] , ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = enable_pronunciation
UpperCAmelCase__ = enable_shape
UpperCAmelCase__ = pronunciation_embed_dim
UpperCAmelCase__ = pronunciation_vocab_size
UpperCAmelCase__ = shape_embed_dim
UpperCAmelCase__ = shape_vocab_size
UpperCAmelCase__ = concat_input
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = classifier_dropout
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
| 392
|
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = 0
while b > 0:
if b & 1:
UpperCAmelCase__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 392
| 1
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase_ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
a__ = BarthezTokenizer
a__ = BarthezTokenizerFast
a__ = True
a__ = True
def _UpperCAmelCase ( self : int ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer
def _UpperCAmelCase ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE = "<pad>"
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _UpperCAmelCase ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__lowerCamelCase ) , 101_122 )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def _UpperCAmelCase ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE = [0, 57, 3_018, 70_307, 91, 2]
SCREAMING_SNAKE_CASE = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="""pt""" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( self : Any ) -> int:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def _UpperCAmelCase ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE = {"input_ids": [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__lowerCamelCase , )
| 705
|
from __future__ import annotations
import math
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if num <= 0:
SCREAMING_SNAKE_CASE = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [True] * (num + 1)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 450
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : Dict , _A : Optional[Any] , _A : Any=7 , _A : Optional[int]=3 , _A : Optional[int]=18 , _A : Optional[Any]=30 , _A : Tuple=4_00 , _A : Any=True , _A : int=None , _A : List[str]=True , _A : Optional[Any]=None , _A : List[Any]=True , _A : Dict=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , ) -> Dict:
UpperCAmelCase_ : str = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : str = max_resolution
UpperCAmelCase_ : Tuple = do_resize
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : Tuple = crop_size
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : Any = image_mean
UpperCAmelCase_ : Tuple = image_std
def A ( self : List[str] ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( __A , unittest.TestCase):
a_ = LevitImageProcessor if is_vision_available() else None
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : str = LevitImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def A ( self : List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A ( self : List[str] ) -> Any:
pass
def A ( self : Any ) -> int:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : int ) -> Any:
# Initialize image_processing
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 541
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Tuple = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : List[str] = DiTPipeline
__a : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__a : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__a : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__a : int = False
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=10_00 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=__lowerCAmelCase , )
UpperCamelCase_ : Optional[Any] = AutoencoderKL()
UpperCamelCase_ : Optional[int] = DDIMScheduler()
UpperCamelCase_ : str = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
UpperCamelCase_ : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
else:
UpperCamelCase_ : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = """cpu"""
UpperCamelCase_ : Dict = self.get_dummy_components()
UpperCamelCase_ : Optional[Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : List[str] = self.get_dummy_inputs(__lowerCAmelCase )
UpperCamelCase_ : List[str] = pipe(**__lowerCAmelCase ).images
UpperCamelCase_ : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase_ : Dict = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
UpperCamelCase_ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _UpperCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=__lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _UpperCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
UpperCamelCase_ : str = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
UpperCamelCase_ : Any = pipe.get_label_ids(__lowerCAmelCase )
UpperCamelCase_ : List[str] = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Any = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
UpperCamelCase_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
UpperCamelCase_ : Union[str, Any] = ["""vase""", """umbrella"""]
UpperCamelCase_ : Union[str, Any] = pipe.get_label_ids(__lowerCAmelCase )
UpperCamelCase_ : Tuple = torch.manual_seed(0 )
UpperCamelCase_ : str = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 543
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase ="pt"
elif is_tf_available():
UpperCamelCase ="tf"
else:
UpperCamelCase ="jax"
class A ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : int = PerceiverTokenizer
__a : List[str] = False
def _UpperCAmelCase ( self ):
super().setUp()
UpperCamelCase_ : Optional[int] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ):
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=20 , __lowerCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCamelCase_ : str = []
for i in range(len(__lowerCAmelCase ) ):
try:
UpperCamelCase_ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCamelCase_ : Optional[Any] = list(filter(lambda __lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __lowerCAmelCase ) )
UpperCamelCase_ : int = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
UpperCamelCase_ : List[Any] = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
UpperCamelCase_ : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase_ : List[Any] = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase_ : str = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
UpperCamelCase_ : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
UpperCamelCase_ : Tuple = """ """ + output_txt
UpperCamelCase_ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.perceiver_tokenizer
UpperCamelCase_ : Optional[int] = """Unicode €."""
UpperCamelCase_ : Dict = tokenizer(__lowerCAmelCase )
UpperCamelCase_ : Tuple = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
UpperCamelCase_ : str = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """[CLS]Unicode €.[SEP]""" )
UpperCamelCase_ : str = tokenizer("""e è é ê ë""" )
UpperCamelCase_ : List[str] = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
UpperCamelCase_ : List[str] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.perceiver_tokenizer
UpperCamelCase_ : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
UpperCamelCase_ : int = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
UpperCamelCase_ : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
UpperCamelCase_ : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCamelCase_ : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.perceiver_tokenizer
UpperCamelCase_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_ : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __lowerCAmelCase )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.perceiver_tokenizer
UpperCamelCase_ : List[str] = [
"""Summary of the text.""",
"""Another summary.""",
]
UpperCamelCase_ : Dict = tokenizer(
text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _UpperCAmelCase ( self ):
# safety check on max_len default value so we are sure the test works
UpperCamelCase_ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCamelCase_ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase_ : Tuple = tempfile.mkdtemp()
UpperCamelCase_ : Optional[int] = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase_ : Dict = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Tuple = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase_ : Tuple = tempfile.mkdtemp()
UpperCamelCase_ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
UpperCamelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCamelCase_ : Union[str, Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Tuple = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : List[Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCamelCase_ : int = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase_ : Union[str, Any] = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase_ : List[str] = json.load(__lowerCAmelCase )
UpperCamelCase_ : Tuple = [F"<extra_id_{i}>" for i in range(1_25 )]
UpperCamelCase_ : Union[str, Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
UpperCamelCase_ : List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase_ : Optional[int] = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase_ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__lowerCAmelCase )]
UpperCamelCase_ : Union[str, Any] = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCamelCase_ : Dict = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase_ : str = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
UpperCamelCase_ : Optional[Any] = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
| 543
| 1
|
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: str =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCamelCase__: List[str] =6
lowerCamelCase__: int =1
lowerCamelCase__: int =1901
lowerCamelCase__: List[str] =0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase__: int =day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCamelCase__: Dict =day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase__: Tuple =day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCamelCase__: Union[str, Any] =1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 59
|
'''simple docstring'''
def snake_case ( a_ : str , a_ : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = (boundary[1] - boundary[0]) / steps
UpperCamelCase_ : Dict = boundary[0]
UpperCamelCase_ : Any = boundary[1]
UpperCamelCase_ : Union[str, Any] = make_points(a_ , a_ , a_ )
UpperCamelCase_ : Any = 0.0
y += (h / 2.0) * f(a_ )
for i in x_i:
# print(i)
y += h * f(a_ )
y += (h / 2.0) * f(a_ )
return y
def snake_case ( a_ : Tuple , a_ : Any , a_ : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = a + h
while x < (b - h):
yield x
UpperCamelCase_ : List[str] = x + h
def snake_case ( a_ : List[str] ) -> Tuple: # enter your function here
"""simple docstring"""
UpperCamelCase_ : int = (x - 0) * (x - 0)
return y
def snake_case ( ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 0.0 # Lower bound of integration
UpperCamelCase_ : Optional[int] = 1.0 # Upper bound of integration
UpperCamelCase_ : Optional[Any] = 10.0 # define number of steps or resolution
UpperCamelCase_ : Optional[Any] = [a, b] # define boundary of integration
UpperCamelCase_ : Any = method_a(a_ , a_ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 208
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
assert column_title.isupper()
snake_case : List[str] = 0
snake_case : Tuple = len(lowercase ) - 1
snake_case : Any = 0
while index >= 0:
snake_case : Optional[int] = (ord(column_title[index] ) - 64) * pow(26 ,lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 712
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( lowerCamelCase ):
create_state_space_tree(_SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(_SCREAMING_SNAKE_CASE ) )] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
if index == len(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE )
return
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__magic_name__ : List[Any] =True
create_state_space_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE )
current_sequence.pop()
__magic_name__ : List[Any] =False
UpperCAmelCase_ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCAmelCase_ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 21
|
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
if len(UpperCamelCase__ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
lowerCamelCase : list[float] = list(UpperCamelCase__ )
lowerCamelCase : List[str] = degree
def __add__( self , UpperCamelCase__ ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowerCamelCase : Union[str, Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCamelCase__ )
else:
lowerCamelCase : Dict = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCamelCase__ )
def __sub__( self , UpperCamelCase__ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , UpperCamelCase__ ) -> Polynomial:
lowerCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> int | float:
lowerCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
lowerCamelCase : Any = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase__ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def _lowercase ( self ) -> Polynomial:
lowerCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
lowerCamelCase : Any = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ = 0 ) -> Polynomial:
lowerCamelCase : list[float] = [0] * (self.degree + 2)
lowerCamelCase : Optional[int] = constant
for i in range(self.degree + 1 ):
lowerCamelCase : List[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCamelCase__ )
def __eq__( self , UpperCamelCase__ ) -> bool:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , UpperCamelCase__ ) -> bool:
return not self.__eq__(UpperCamelCase__ )
| 311
| 0
|
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = """▁"""
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """prophetnet.tokenizer"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
SCREAMING_SNAKE_CASE_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
SCREAMING_SNAKE_CASE_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
a_ : Optional[int] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__, "r", encoding="utf-8" ) as reader:
a_ : Optional[Any] = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = token.rstrip("\n" )
a_ : Tuple = index
return vocab
class snake_case_ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , a_ , a_="[SEP]" , a_="[SEP]" , a_="[SEP]" , a_="[UNK]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_ = None , **a_ , ):
a_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
a_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
a_ : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
a_ : Tuple = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(1_0 ):
a_ : Tuple = F"""[unused{i}]"""
a_ : str = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
a_ : Dict = 1_2
a_ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(a_ )
def __getstate__( self ):
a_ : Tuple = self.__dict__.copy()
a_ : str = None
return state
def __setstate__( self , a_ ):
a_ : List[str] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a_ : Optional[int] = {}
a_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return ([0] * len(a_ )) + [1]
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def snake_case_ ( self , a_ , a_ = None ):
a_ : Any = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
return len(self.sp_model ) + self.fairseq_offset
def snake_case_ ( self ):
a_ : Any = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def snake_case_ ( self , a_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Any = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , a_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , a_ ):
a_ : List[Any] = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def snake_case_ ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : str = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def snake_case_ ( self , a_ , a_ = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
a_ : int = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
| 1
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"Salesforce/codegen-350M-mono": 2048,
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = CodeGenTokenizer
def __init__( self : Union[str, Any] , __snake_case : Any=None , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Union[str, Any]="<|endoftext|>" , __snake_case : List[Any]="<|endoftext|>" , __snake_case : Tuple="<|endoftext|>" , __snake_case : List[Any]=False , **__snake_case : List[str] , )-> Optional[Any]:
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
if kwargs.pop("""add_bos_token""" , __snake_case ):
snake_case = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __snake_case ) != add_prefix_space:
snake_case = getattr(__snake_case , pre_tok_state.pop("""type""" ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**__snake_case )
snake_case = add_prefix_space
def lowerCAmelCase ( self : List[str] , *__snake_case : List[Any] , **__snake_case : Optional[Any] )-> BatchEncoding:
snake_case = kwargs.get("""is_split_into_words""" , __snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : List[str] , *__snake_case : Optional[Any] , **__snake_case : List[Any] )-> BatchEncoding:
snake_case = kwargs.get("""is_split_into_words""" , __snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
snake_case = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __snake_case : bool = False , __snake_case : bool = None , __snake_case : Optional[List[str]] = None , **__snake_case : List[Any] , )-> str:
snake_case = super().decode(
token_ids=__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case , **__snake_case , )
if truncate_before_pattern is not None and len(__snake_case ) > 0:
snake_case = self.truncate(__snake_case , __snake_case )
return decoded_text
def lowerCAmelCase ( self : int , __snake_case : Any , __snake_case : Any )-> List[Any]:
def find_re(__snake_case : str , __snake_case : Any , __snake_case : List[str] ):
snake_case = pattern.search(__snake_case , __snake_case )
return m.start() if m else -1
snake_case = [re.compile(__snake_case , re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case = list(re.finditer("""^print""" , __snake_case , re.MULTILINE ) )
if len(__snake_case ) > 1:
snake_case = completion[: prints[1].start()]
snake_case = list(re.finditer("""^def""" , __snake_case , re.MULTILINE ) )
if len(__snake_case ) > 1:
snake_case = completion[: defs[1].start()]
snake_case = 0
snake_case = [
pos for pos in [find_re(__snake_case , __snake_case , __snake_case ) for terminal in terminals] if pos != -1
]
if len(__snake_case ) > 0:
return completion[: min(__snake_case )]
else:
return completion
| 369
|
'''simple docstring'''
import os
from collections.abc import Iterator
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
snake_case = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
return F'''{i * " "}*''' if i else "\n##"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str:
snake_case = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> None:
snake_case = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
snake_case , snake_case = os.path.split(__lowerCAmelCase )
if filepath != old_path:
snake_case = print_path(__lowerCAmelCase , __lowerCAmelCase )
snake_case = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
snake_case = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 369
| 1
|
'''simple docstring'''
def __magic_name__( _A , _A ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __magic_name__( _A , _A=0 ):
'''simple docstring'''
return sorted(_A , key=lambda _A : x[column] )
def __magic_name__( _A , _A , _A=float("""inf""" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _A ):
UpperCamelCase__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCamelCase__ = current_dis
return min_dis
def __magic_name__( _A , _A , _A=float("""inf""" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _A ):
for j in range(max(0 , i - 6 ) , _A ):
UpperCamelCase__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCamelCase__ = current_dis
return min_dis
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_A , _A )
# recursion
UpperCamelCase__ = points_counts // 2
UpperCamelCase__ = closest_pair_of_points_sqr(
_A , points_sorted_on_y[:mid] , _A )
UpperCamelCase__ = closest_pair_of_points_sqr(
_A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCamelCase__ = min(_A , _A )
UpperCamelCase__ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_A )
UpperCamelCase__ = dis_between_closest_in_strip(
_A , len(_A ) , _A )
return min(_A , _A )
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = column_based_sort(_A , column=0 )
UpperCamelCase__ = column_based_sort(_A , column=1 )
return (
closest_pair_of_points_sqr(
_A , _A , _A )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ : Any = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 718
|
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCamelCase_ : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCamelCase_ : Optional[int] = logging.WARNING
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = os.getenv("""DATASETS_VERBOSITY""" , _A )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __magic_name__( ):
'''simple docstring'''
return __name__.split(""".""" )[0]
def __magic_name__( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __magic_name__( _A = None ):
'''simple docstring'''
if name is None:
UpperCamelCase__ = _get_library_name()
return logging.getLogger(_A )
def __magic_name__( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def __magic_name__( _A ):
'''simple docstring'''
_get_library_root_logger().setLevel(_A )
def __magic_name__( ):
'''simple docstring'''
return set_verbosity(_A )
def __magic_name__( ):
'''simple docstring'''
return set_verbosity(_A )
def __magic_name__( ):
'''simple docstring'''
return set_verbosity(_A )
def __magic_name__( ):
'''simple docstring'''
return set_verbosity(_A )
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = False
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , *lowercase : Any , **lowercase : int ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase__ = args[0] if args else None
def __iter__( self : str ) -> List[str]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Optional[int] , lowercase : Tuple ) -> int:
'''simple docstring'''
def empty_fn(*lowercase : List[Any] , **lowercase : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict ) -> Tuple:
'''simple docstring'''
return self
def __exit__( self : Tuple , lowercase : Optional[Any] , lowercase : Any , lowercase : int ) -> Any:
'''simple docstring'''
return
lowerCamelCase_ : str = True
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : List[Any] , *lowercase : str , lowercase : Optional[int]=False , **lowercase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase , **lowercase )
else:
return EmptyTqdm(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : Tuple , **lowercase : Any ) -> Any:
'''simple docstring'''
UpperCamelCase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase , **lowercase )
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase_ : int = _tqdm_cls()
def __magic_name__( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __magic_name__( ):
'''simple docstring'''
global _tqdm_active
UpperCamelCase__ = True
def __magic_name__( ):
'''simple docstring'''
global _tqdm_active
UpperCamelCase__ = False
| 265
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Tuple = AlbertTokenizer
__UpperCamelCase: List[str] = AlbertTokenizerFast
__UpperCamelCase: List[str] = True
__UpperCamelCase: List[Any] = True
__UpperCamelCase: Optional[Any] = True
def _A ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : int = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Dict , A : Tuple ):
_UpperCAmelCase : Union[str, Any] = "this is a test"
_UpperCAmelCase : Optional[int] = "this is a test"
return input_text, output_text
def _A ( self : Optional[int] ):
_UpperCAmelCase : Any = "<pad>"
_UpperCAmelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(A ) , 30000 )
def _A ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _A ( self : Tuple ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : str = self.get_rust_tokenizer()
_UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(A )
_UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
_UpperCAmelCase : Optional[int] = tokenizer.encode(A , add_special_tokens=A )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Tuple = tokenizer.encode(A )
_UpperCAmelCase : Any = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _A ( self : List[Any] ):
_UpperCAmelCase : str = AlbertTokenizer(A , keep_accents=A )
_UpperCAmelCase : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(A , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
_UpperCAmelCase : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
_UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = AlbertTokenizer(A )
_UpperCAmelCase : Dict = tokenizer.encode("sequence builders" )
_UpperCAmelCase : Optional[int] = tokenizer.encode("multi-sequence build" )
_UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
_UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _A ( self : Optional[int] ):
# fmt: off
_UpperCAmelCase : List[Any] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 244
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
| 1
|
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(_a ):
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(_a ):
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _lowerCAmelCase ( self ):
"""simple docstring"""
import PIL.Image
lowerCamelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_a ) as mock_cast_to_python_objects:
lowerCamelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
lowerCamelCase , lowerCamelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _a )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def a__ ( snake_case__ , snake_case__ ) -> List[str]:
lowerCamelCase = pa.BufferReader(snake_case__ ) if isinstance(snake_case__ , pa.Buffer ) else pa.memory_map(snake_case__ )
lowerCamelCase = pa.ipc.open_stream(snake_case__ )
lowerCamelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def a__ ( snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = pa.schema(snake_case__ ) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def a__ ( ) -> Optional[Any]:
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=snake_case__ , features=snake_case__ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCamelCase = pa.BufferReader(output.getvalue() )
lowerCamelCase = pa.ipc.open_stream(snake_case__ )
lowerCamelCase = f.read_all()
lowerCamelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(snake_case__ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def a__ ( snake_case__ ) -> Any:
lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
with pytest.raises(snake_case__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
lowerCamelCase , lowerCamelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def a__ ( snake_case__ ) -> str:
lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
with pytest.raises(snake_case__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
lowerCamelCase , lowerCamelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case__ , writer_batch_size=snake_case__ , hash_salt="""split_name""" , check_duplicates=snake_case__ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def a__ ( snake_case__ , snake_case__ ) -> Dict:
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = pa.schema(snake_case__ ) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = pa.schema(snake_case__ ) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = pa.schema(snake_case__ ) if fields else None
with ArrowWriter(stream=snake_case__ , schema=snake_case__ , writer_batch_size=snake_case__ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def a__ ( ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
lowerCamelCase = os.path.join(snake_case__ , """test.arrow""" )
with ArrowWriter(path=snake_case__ , schema=pa.schema(snake_case__ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(snake_case__ , metadata=writer._schema.metadata )
_check_output(snake_case__ , 1 )
def a__ ( snake_case__ ) -> Dict:
if pa.types.is_list(snake_case__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def a__ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
if isinstance(lst[0] , snake_case__ ):
change_first_primitive_element_in_list(lst[0] , snake_case__ )
else:
lowerCamelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
lowerCamelCase = pa.array(TypedSequence(snake_case__ , optimized_int_type=snake_case__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
# in range
lowerCamelCase = pa.array(OptimizedTypedSequence(snake_case__ , col=snake_case__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCamelCase = copy.deepcopy(snake_case__ )
lowerCamelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(snake_case__ , snake_case__ )
lowerCamelCase = pa.array(OptimizedTypedSequence(snake_case__ , col=snake_case__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
lowerCamelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=snake_case__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def a__ ( snake_case__ ) -> List[str]:
lowerCamelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=snake_case__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(snake_case__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(snake_case__ )
def a__ ( ) -> Any:
lowerCamelCase = pa.BufferOutputStream()
with ParquetWriter(stream=snake_case__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCamelCase = pa.BufferReader(output.getvalue() )
lowerCamelCase = pq.read_table(snake_case__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
import PIL.Image
lowerCamelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(snake_case__ , format="""png""" )
lowerCamelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=snake_case__ , features=Features({"""image""": Image()} ) , embed_local_files=snake_case__ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
lowerCamelCase = pa.BufferReader(output.getvalue() )
lowerCamelCase = pq.read_table(snake_case__ )
lowerCamelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , snake_case__ )
with open(snake_case__ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def a__ ( ) -> Dict:
lowerCamelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=snake_case__ )] )
lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(stream=snake_case__ ) as writer:
writer._build_writer(inferred_schema=snake_case__ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 533
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase : List[Any] = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
lowerCAmelCase : List[str] = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
lowerCAmelCase : str = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def _lowerCAmelCase ( self , _a , _a , _a = CHRF.CHAR_ORDER , _a = CHRF.WORD_ORDER , _a = CHRF.BETA , _a = False , _a = False , _a = False , ):
"""simple docstring"""
lowerCamelCase = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase = [[refs[i] for refs in references] for i in range(_a )]
lowerCamelCase = CHRF(_a , _a , _a , _a , _a , _a )
lowerCamelCase = sb_chrf.corpus_score(_a , _a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 533
| 1
|
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
UpperCamelCase :Union[str, Any] = ['''torch''', '''torchsde''']
def __init__(self , *__magic_name__ , **__magic_name__ ):
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _snake_case (cls , *__magic_name__ , **__magic_name__ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _snake_case (cls , *__magic_name__ , **__magic_name__ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
| 157
|
# Lint as: python3
import itertools
import os
import re
_lowercase = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
_lowercase = re.compile(r'''([a-z\d])([A-Z])''')
_lowercase = re.compile(r'''(?<!_)_(?!_)''')
_lowercase = re.compile(r'''(_{2,})''')
_lowercase = r'''^\w+(\.\w+)*$'''
_lowercase = r'''<>:/\|?*'''
def _A (UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : List[str] = _uppercase_uppercase_re.sub(r"""\1_\2""" , UpperCamelCase )
lowerCamelCase__ : Optional[int] = _lowercase_uppercase_re.sub(r"""\1_\2""" , UpperCamelCase )
return name.lower()
def _A (UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = _single_underscore_re.split(UpperCamelCase )
lowerCamelCase__ : int = [_multiple_underscores_re.split(UpperCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase ) if n != """""" )
def _A (UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
if os.path.basename(UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(UpperCamelCase )
def _A (UpperCamelCase : int , UpperCamelCase : Dict ) ->List[Any]:
'''simple docstring'''
if os.path.basename(UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , UpperCamelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(UpperCamelCase )}-{split}"
def _A (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=None ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : Any = filename_prefix_for_split(UpperCamelCase , UpperCamelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowerCamelCase__ : List[Any] = os.path.join(UpperCamelCase , UpperCamelCase )
return f"{filepath}*"
def _A (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Dict=None ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ : List[Any] = filename_prefix_for_split(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = os.path.join(UpperCamelCase , UpperCamelCase )
if shard_lengths:
lowerCamelCase__ : Optional[int] = len(UpperCamelCase )
lowerCamelCase__ : List[Any] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(UpperCamelCase )]
if filetype_suffix:
lowerCamelCase__ : Tuple = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowerCamelCase__ : List[str] = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 157
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _lowercase ( UpperCamelCase__ : Optional[Any] ):
if "cls_token" in name:
__A : Union[str, Any] = name.replace('cls_token', 'vit.embeddings.cls_token' )
if "mask_token" in name:
__A : Tuple = name.replace('mask_token', 'decoder.mask_token' )
if "decoder_pos_embed" in name:
__A : int = name.replace('decoder_pos_embed', 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
__A : Optional[Any] = name.replace('pos_embed', 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__A : Optional[int] = name.replace('patch_embed.proj', 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__A : str = name.replace('patch_embed.norm', 'vit.embeddings.norm' )
if "decoder_blocks" in name:
__A : Dict = name.replace('decoder_blocks', 'decoder.decoder_layers' )
if "blocks" in name:
__A : Union[str, Any] = name.replace('blocks', 'vit.encoder.layer' )
if "attn.proj" in name:
__A : int = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
__A : Tuple = name.replace('attn', 'attention.self' )
if "norm1" in name:
__A : List[Any] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__A : Optional[Any] = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
__A : Optional[Any] = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__A : Tuple = name.replace('mlp.fc2', 'output.dense' )
if "decoder_embed" in name:
__A : Optional[int] = name.replace('decoder_embed', 'decoder.decoder_embed' )
if "decoder_norm" in name:
__A : str = name.replace('decoder_norm', 'decoder.decoder_norm' )
if "decoder_pred" in name:
__A : Tuple = name.replace('decoder_pred', 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
__A : Union[str, Any] = name.replace('norm.weight', 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
__A : List[str] = name.replace('norm.bias', 'vit.layernorm.bias' )
return name
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any] ):
for key in orig_state_dict.copy().keys():
__A : str = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
__A : Union[str, Any] = key.split('.' )
__A : int = int(key_split[1] )
if "decoder_blocks" in key:
__A : str = config.decoder_hidden_size
__A : Union[str, Any] = 'decoder.decoder_layers.'
if "weight" in key:
__A : Dict = val[:dim, :]
__A : Optional[int] = val[dim : dim * 2, :]
__A : int = val[-dim:, :]
elif "bias" in key:
__A : Optional[Any] = val[:dim]
__A : Tuple = val[dim : dim * 2]
__A : str = val[-dim:]
else:
__A : Tuple = config.hidden_size
__A : Tuple = 'vit.encoder.layer.'
if "weight" in key:
__A : Optional[int] = val[:dim, :]
__A : str = val[dim : dim * 2, :]
__A : List[Any] = val[-dim:, :]
elif "bias" in key:
__A : Dict = val[:dim]
__A : str = val[dim : dim * 2]
__A : int = val[-dim:]
else:
__A : Tuple = val
return orig_state_dict
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple ):
__A : str = ViTMAEConfig()
if "large" in checkpoint_url:
__A : int = 1024
__A : Union[str, Any] = 4096
__A : str = 24
__A : List[Any] = 16
elif "huge" in checkpoint_url:
__A : Dict = 14
__A : Tuple = 1280
__A : int = 5120
__A : Union[str, Any] = 32
__A : List[str] = 16
__A : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
__A : str = torch.hub.load_state_dict_from_url(UpperCamelCase__, map_location='cpu' )['model']
__A : Optional[int] = ViTMAEImageProcessor(size=config.image_size )
__A : Tuple = convert_state_dict(UpperCamelCase__, UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
__A : Any = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
__A : Union[str, Any] = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
__A : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
__A : Union[str, Any] = image_processor(images=UpperCamelCase__, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__A : Optional[Any] = model(**UpperCamelCase__ )
__A : Union[str, Any] = outputs.logits
if "large" in checkpoint_url:
__A : Optional[int] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
__A : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
__A : Optional[int] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3], UpperCamelCase__, atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 540
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def _lowercase ( ):
__A : Tuple = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path', type=UpperCamelCase__, default='data/dump.txt', help='The path to the data.' )
parser.add_argument('--tokenizer_type', type=UpperCamelCase__, default='bert', choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name', type=UpperCamelCase__, default='bert-base-uncased', help='The tokenizer to use.' )
parser.add_argument('--dump_file', type=UpperCamelCase__, default='data/dump', help='The dump file prefix.' )
__A : Tuple = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
__A : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
__A : Any = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
__A : List[Any] = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
__A : Any = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__A : List[Any] = tokenizer.special_tokens_map['cls_token'] # `<s>`
__A : Dict = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
__A : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__A : Any = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
__A : Union[str, Any] = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, 'r', encoding='utf8' ) as fp:
__A : Optional[int] = fp.readlines()
logger.info('Start encoding' )
logger.info(f"""{len(UpperCamelCase__ )} examples to process.""" )
__A : int = []
__A : List[str] = 0
__A : Tuple = 10000
__A : List[str] = time.time()
for text in data:
__A : int = f"""{bos} {text.strip()} {sep}"""
__A : List[Any] = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
rslt.append(UpperCamelCase__ )
iter += 1
if iter % interval == 0:
__A : Any = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
__A : Any = time.time()
logger.info('Finished binarization' )
logger.info(f"""{len(UpperCamelCase__ )} examples processed.""" )
__A : List[str] = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
__A : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__A : Optional[int] = [np.uintaa(UpperCamelCase__ ) for d in rslt]
else:
__A : int = [np.intaa(UpperCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(UpperCamelCase__, 'wb' ) as handle:
pickle.dump(rslt_, UpperCamelCase__, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 540
| 1
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Any = parent
lowercase__ : Optional[Any] = 13
lowercase__ : Any = 7
lowercase__ : Optional[Any] = 30
lowercase__ : int = self.seq_length + self.mem_len
lowercase__ : str = 15
lowercase__ : int = True
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = 99
lowercase__ : Any = [10, 50, 80]
lowercase__ : str = 32
lowercase__ : Tuple = 32
lowercase__ : int = 4
lowercase__ : Tuple = 8
lowercase__ : Optional[int] = 1_28
lowercase__ : Any = 2
lowercase__ : Optional[int] = 2
lowercase__ : List[Any] = None
lowercase__ : Union[str, Any] = 1
lowercase__ : List[Any] = 0
lowercase__ : Union[str, Any] = 3
lowercase__ : Tuple = self.vocab_size - 1
lowercase__ : Union[str, Any] = 0.0_1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase__ ( self):
'''simple docstring'''
random.seed(self.seed)
tf.random.set_seed(self.seed)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = TFTransfoXLModel(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : List[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
lowercase__ , lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : int = {"""input_ids""": input_ids_a, """labels""": lm_labels}
lowercase__ , lowercase__ : str = model(SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ , lowercase__ : Dict = model([input_ids_a, mems_a]).to_tuple()
lowercase__ : Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
lowercase__ , lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : Any = config_and_inputs
lowercase__ : Any = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase : Union[str, Any] = () if is_tf_available() else ()
__lowerCAmelCase : Optional[int] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCAmelCase : Any = False
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[Any] = False
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = TFTransfoXLModelTester(self)
lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , d_embed=37)
def lowercase__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self):
'''simple docstring'''
self.model_tester.set_seed()
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.model_tester.set_seed()
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
lowercase__ : Tuple = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer)
lowercase__ : List[Any] = model.get_bias()
assert name is None
else:
lowercase__ : Union[str, Any] = model.get_output_embeddings()
assert x is None
lowercase__ : Optional[Any] = model.get_bias()
assert name is None
def lowercase__ ( self):
'''simple docstring'''
pass
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""")
def lowercase__ ( self):
'''simple docstring'''
pass
@require_tf
class _snake_case ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""")
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""")
# fmt: off
lowercase__ : int = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase__ : Optional[int] = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase__ : List[Any] = model.generate(SCREAMING_SNAKE_CASE_ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE_)
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_)
| 12
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowercase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _distribute_shards(**lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _split_gen_kwargs(lowercase ,lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowercase ):
_number_of_shards_in_gen_kwargs(lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(lowercase )
assert out == expected
| 277
| 0
|
def _A ( lowerCamelCase ):
return " ".join(
"".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 629
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :int = 'bert'
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=3_0522 , _lowerCAmelCase : List[str]=768 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : List[str]=3072 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Dict=1e-12 , _lowerCAmelCase : Optional[int]=0 , _lowerCAmelCase : Optional[Any]="absolute" , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def _a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_SCREAMING_SNAKE_CASE = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 614
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__snake_case = 3_84
if "tiny" in model_name:
__snake_case = [3, 3, 9, 3]
__snake_case = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [1_28, 2_56, 5_12, 10_24]
__snake_case = 5_12
if "large" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [1_92, 3_84, 7_68, 15_36]
__snake_case = 7_68
if "xlarge" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [2_56, 5_12, 10_24, 20_48]
__snake_case = 10_24
# set label information
__snake_case = 1_50
__snake_case = "huggingface/label-files"
__snake_case = "ade20k-id2label.json"
__snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__snake_case = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] )
__snake_case = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = dct.pop(SCREAMING_SNAKE_CASE )
__snake_case = val
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
__snake_case = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__snake_case = model_name_to_url[model_name]
__snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )["state_dict"]
__snake_case = get_upernet_config(SCREAMING_SNAKE_CASE )
__snake_case = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__snake_case = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
__snake_case = key.replace("bn" , "batch_norm" )
__snake_case = val
# rename keys
__snake_case = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
__snake_case = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
__snake_case = SegformerImageProcessor()
__snake_case = processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
__snake_case = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
__snake_case = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
__snake_case = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
__snake_case = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
__snake_case = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 614
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "lxmert"
__A = {}
def __init__( self : Optional[int] , __lowerCAmelCase : int=3_0522 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Optional[Any]=9500 , __lowerCAmelCase : Tuple=1600 , __lowerCAmelCase : Union[str, Any]=400 , __lowerCAmelCase : int=3072 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : Tuple=1e-12 , __lowerCAmelCase : Union[str, Any]=9 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=6.6_7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : int=True , **__lowerCAmelCase : int , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = num_qa_labels
_lowerCAmelCase = num_object_labels
_lowerCAmelCase = num_attr_labels
_lowerCAmelCase = l_layers
_lowerCAmelCase = x_layers
_lowerCAmelCase = r_layers
_lowerCAmelCase = visual_feat_dim
_lowerCAmelCase = visual_pos_dim
_lowerCAmelCase = visual_loss_normalizer
_lowerCAmelCase = task_matched
_lowerCAmelCase = task_mask_lm
_lowerCAmelCase = task_obj_predict
_lowerCAmelCase = task_qa
_lowerCAmelCase = visual_obj_loss
_lowerCAmelCase = visual_attr_loss
_lowerCAmelCase = visual_feat_loss
_lowerCAmelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__lowerCAmelCase )
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "dandelin/vilt-b32-finetuned-vqa"
__A = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__A = "image_qa"
__A = AutoProcessor
__A = AutoModelForVisualQuestionAnswering
__A = ["image", "text"]
__A = ["text"]
def __init__( self : List[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : str ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
def a ( self : int , __lowerCAmelCase : "Image" , __lowerCAmelCase : str ):
"""simple docstring"""
return self.pre_processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors='pt' )
def a ( self : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
with torch.no_grad():
return self.model(**__lowerCAmelCase ).logits
def a ( self : Tuple , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCAmelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 309
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """gpt_neox_japanese"""
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any]=3_20_00 , __SCREAMING_SNAKE_CASE : Optional[int]=25_60 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=1.00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_00_00 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=3_19_96 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_19_99 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_multiple_size
__a = hidden_act
__a = rotary_pct
__a = rotary_emb_base
__a = initializer_range
__a = layer_norm_eps
__a = use_cache
__a = attention_dropout
__a = hidden_dropout
| 710
|
from __future__ import annotations
def __A ( _A ):
"""simple docstring"""
__a = [True] * limit
__a = False
__a = False
__a = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__a = i * 2
while index < limit:
__a = False
__a = index + i
__a = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def __A ( _A = 100_0000 ):
"""simple docstring"""
__a = prime_sieve(_A )
__a = 0
__a = 0
for i in range(len(_A ) ):
for j in range(i + length , len(_A ) ):
__a = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__a = j - i
__a = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 525
| 0
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase_ :
def __init__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {}
def lowercase_ ( self : Optional[int] , _A : Optional[int] , _A : str , _A : List[str]=1 ):
'''simple docstring'''
if self.graph.get(_A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase__ : Tuple = [[w, v]]
if not self.graph.get(_A ):
UpperCAmelCase__ : List[Any] = []
def lowercase_ ( self : Dict ):
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self : int , _A : List[str] , _A : str ):
'''simple docstring'''
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
def lowercase_ ( self : Dict , _A : List[Any]=-2 , _A : Optional[int]=-1 ):
'''simple docstring'''
if s == d:
return []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : int = []
if s == -2:
UpperCAmelCase__ : Tuple = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
UpperCAmelCase__ : Dict = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : str = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowercase_ ( self : Tuple , _A : int=-1 ):
'''simple docstring'''
if c == -1:
UpperCAmelCase__ : Tuple = floor(random() * 10_000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCAmelCase__ : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowercase_ ( self : Tuple , _A : List[str]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : int = deque()
UpperCAmelCase__ : Optional[int] = []
if s == -2:
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
UpperCAmelCase__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase_ ( self : Dict , _A : Union[str, Any] ):
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self : Dict , _A : Optional[Any]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = []
if s == -2:
UpperCAmelCase__ : List[str] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Any = s
UpperCAmelCase__ : Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_A ) != 0:
UpperCAmelCase__ : Union[str, Any] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return sorted_nodes
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : int = -2
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = s
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Dict = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Union[str, Any] = True
if len(_A ) != 0:
UpperCAmelCase__ : List[str] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = False
indirect_parents.append(_A )
UpperCAmelCase__ : Tuple = s
UpperCAmelCase__ : Tuple = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Dict = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Union[str, Any] = -2
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = s
UpperCAmelCase__ : str = False
UpperCAmelCase__ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : List[str] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Union[str, Any] = True
if len(_A ) != 0:
UpperCAmelCase__ : int = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Dict = False
indirect_parents.append(_A )
UpperCAmelCase__ : int = s
UpperCAmelCase__ : Optional[int] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowercase_ ( self : Dict , _A : Any=-2 , _A : List[str]=-1 ):
'''simple docstring'''
UpperCAmelCase__ : Any = time()
self.dfs(_A , _A )
UpperCAmelCase__ : Union[str, Any] = time()
return end - begin
def lowercase_ ( self : Union[str, Any] , _A : Optional[int]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = time()
self.bfs(_A )
UpperCAmelCase__ : Any = time()
return end - begin
class lowerCamelCase_ :
def __init__( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
def lowercase_ ( self : List[Any] , _A : List[str] , _A : Optional[int] , _A : Any=1 ):
'''simple docstring'''
if self.graph.get(_A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase__ : Any = [[w, v]]
# add the other way
if self.graph.get(_A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase__ : List[str] = [[w, u]]
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
# the other way round
if self.graph.get(_A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_A )
def lowercase_ ( self : Optional[Any] , _A : int=-2 , _A : Optional[int]=-1 ):
'''simple docstring'''
if s == d:
return []
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Dict = []
if s == -2:
UpperCAmelCase__ : Optional[Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
UpperCAmelCase__ : str = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowercase_ ( self : str , _A : Tuple=-1 ):
'''simple docstring'''
if c == -1:
UpperCAmelCase__ : str = floor(random() * 10_000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCAmelCase__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowercase_ ( self : Tuple , _A : List[str]=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = deque()
UpperCAmelCase__ : Optional[int] = []
if s == -2:
UpperCAmelCase__ : int = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
UpperCAmelCase__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase_ ( self : Tuple , _A : int ):
'''simple docstring'''
return len(self.graph[u] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Union[str, Any] = -2
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[Any] = s
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Tuple = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Dict = True
if len(_A ) != 0:
UpperCAmelCase__ : Optional[int] = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[int] = False
indirect_parents.append(_A )
UpperCAmelCase__ : Optional[int] = s
UpperCAmelCase__ : Dict = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
UpperCAmelCase__ : Dict = -2
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Optional[Any] = s
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Union[str, Any] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : List[str] = True
if len(_A ) != 0:
UpperCAmelCase__ : Any = stack[len(_A ) - 1]
else:
UpperCAmelCase__ : Optional[Any] = False
indirect_parents.append(_A )
UpperCAmelCase__ : List[str] = s
UpperCAmelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return list(self.graph )
def lowercase_ ( self : List[Any] , _A : Any=-2 , _A : List[str]=-1 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = time()
self.dfs(_A , _A )
UpperCAmelCase__ : Tuple = time()
return end - begin
def lowercase_ ( self : int , _A : str=-2 ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = time()
self.bfs(_A )
UpperCAmelCase__ : int = time()
return end - begin
| 75
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self , A ) -> Union[str, Any]:
A: Any = 3
A: str = 2_50
A: Tuple = ids_tensor((batch_size, length) , A )
A: Any = torch.ones((batch_size, length) , device=A , dtype=torch.float ) / length
return input_ids, scores
def a__ ( self ) -> str:
A , A: Tuple = self._get_tensors(5 )
A: Dict = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A , A ) )
A , A: int = self._get_tensors(9 )
self.assertFalse(criteria(A , A ) )
A , A: Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(A , A ) )
def a__ ( self ) -> Optional[int]:
A: List[Any] = MaxLengthCriteria(max_length=10 )
A , A: List[str] = self._get_tensors(5 )
self.assertFalse(criteria(A , A ) )
A , A: int = self._get_tensors(9 )
self.assertFalse(criteria(A , A ) )
A , A: Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(A , A ) )
def a__ ( self ) -> Any:
A: Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
A , A: Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(A , A ) )
A , A: Any = self._get_tensors(9 )
self.assertFalse(criteria(A , A ) )
A , A: Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(A , A ) )
A: str = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__ ( self ) -> Tuple:
A , A: Optional[Any] = self._get_tensors(5 )
A: Dict = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A , A ) )
A: Any = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A , A ) )
def a__ ( self ) -> int:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
A: List[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(A ) , 1 )
| 135
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Any = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''t5'''
_A : Optional[Any] = ['''past_key_values''']
_A : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[str]=3_2_1_2_8 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : str=6_4 , lowerCAmelCase__ : Tuple=2_0_4_8 , lowerCAmelCase__ : Optional[Any]=6 , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Any=8 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : Dict=1_2_8 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : str=1E-6 , lowerCAmelCase__ : str=1.0 , lowerCAmelCase__ : Optional[Any]="relu" , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[Any]=1 , **lowerCAmelCase__ : Dict , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : str = d_model
__SCREAMING_SNAKE_CASE : str = d_kv
__SCREAMING_SNAKE_CASE : Optional[Any] = d_ff
__SCREAMING_SNAKE_CASE : Optional[Any] = num_layers
__SCREAMING_SNAKE_CASE : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[int] = num_heads
__SCREAMING_SNAKE_CASE : Any = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : int = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Optional[int] = dropout_rate
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
__SCREAMING_SNAKE_CASE : Any = feed_forward_proj
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : List[str] = self.feed_forward_proj.split("""-""" )
__SCREAMING_SNAKE_CASE : Tuple = act_info[-1]
__SCREAMING_SNAKE_CASE : int = act_info[0] == """gated"""
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__SCREAMING_SNAKE_CASE : Optional[Any] = """past_encoder_sequence + sequence"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch"""}
__SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
return common_inputs
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return 1_3
| 178
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
UpperCAmelCase__ : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
UpperCAmelCase__ : int = parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase__ : Union[str, Any] = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 65
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = IFInpaintingPipeline
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=0) ->Optional[Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Tuple =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Optional[int] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: int =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: int =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA")
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 437
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "Hello, World!"
__A = "en_XX"
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =Path("data_bin" )
lowerCamelCase__: int =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__a ).parent ) , checkpoint_file=Path(__a ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__a ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__a ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(__a )
lowerCamelCase__: Optional[int] =xmod.model.encoder.sentence_encoder
lowerCamelCase__: Tuple =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase__: Optional[Any] =xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , __a )
lowerCamelCase__: Tuple =XmodForSequenceClassification(__a ) if classification_head else XmodForMaskedLM(__a )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__: Any =xmod_sent_encoder.embed_tokens.weight
lowerCamelCase__: List[Any] =xmod_sent_encoder.embed_positions.weight
lowerCamelCase__: Any =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase__: List[Any] =xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase__: Union[str, Any] =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__: List[Any] =model.roberta.encoder.layer[i]
lowerCamelCase__: Union[str, Any] =xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase__: Any =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
lowerCamelCase__: List[str] =xmod_layer.self_attn.q_proj.weight
lowerCamelCase__: Any =xmod_layer.self_attn.q_proj.bias
lowerCamelCase__: Any =xmod_layer.self_attn.k_proj.weight
lowerCamelCase__: Tuple =xmod_layer.self_attn.k_proj.bias
lowerCamelCase__: Optional[int] =xmod_layer.self_attn.v_proj.weight
lowerCamelCase__: List[str] =xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__: Optional[int] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
lowerCamelCase__: Dict =xmod_layer.self_attn.out_proj.weight
lowerCamelCase__: Optional[Any] =xmod_layer.self_attn.out_proj.bias
lowerCamelCase__: List[Any] =xmod_layer.self_attn_layer_norm.weight
lowerCamelCase__: Dict =xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase__: Optional[Any] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
lowerCamelCase__: int =xmod_layer.fca.weight
lowerCamelCase__: List[str] =xmod_layer.fca.bias
# output
lowerCamelCase__: str =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
lowerCamelCase__: Optional[Any] =xmod_layer.fca.weight
lowerCamelCase__: int =xmod_layer.fca.bias
lowerCamelCase__: List[str] =xmod_layer.final_layer_norm.weight
lowerCamelCase__: List[Any] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase__: Tuple =xmod_layer.adapter_layer_norm.weight
lowerCamelCase__: List[str] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase__: Optional[int] =bert_output.adapter_modules[lang_code]
lowerCamelCase__: Optional[int] =xmod_layer.adapter_modules[lang_code]
lowerCamelCase__: Any =from_adapter.fca.weight
lowerCamelCase__: Tuple =from_adapter.fca.bias
lowerCamelCase__: Optional[Any] =from_adapter.fca.weight
lowerCamelCase__: Optional[int] =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase__: Tuple =xmod_sent_encoder.layer_norm.weight
lowerCamelCase__: Dict =xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase__: List[Any] =xmod.model.classification_heads["mnli"].dense.weight
lowerCamelCase__: int =xmod.model.classification_heads["mnli"].dense.bias
lowerCamelCase__: List[str] =xmod.model.classification_heads["mnli"].out_proj.weight
lowerCamelCase__: Dict =xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowerCamelCase__: Tuple =xmod.model.encoder.lm_head.dense.weight
lowerCamelCase__: int =xmod.model.encoder.lm_head.dense.bias
lowerCamelCase__: List[Any] =xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__: str =xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__: str =xmod.model.encoder.lm_head.weight
lowerCamelCase__: str =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__: List[str] =xmod.encode(__a ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__a )
lowerCamelCase__: List[Any] =model(__a )[0]
if classification_head:
lowerCamelCase__: Union[str, Any] =xmod.model.classification_heads["mnli"](xmod.extract_features(__a ) )
else:
lowerCamelCase__: Dict =xmod.model(__a , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__: Optional[int] =torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__: Tuple =torch.allclose(__a , __a , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__a ).mkdir(parents=__a , exist_ok=__a )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__A = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 437
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __magic_name__ :
def __init__( self : int , snake_case_ : Any ):
__snake_case = data
__snake_case = None
class __magic_name__ :
def __init__( self : Optional[int] ):
__snake_case = None
__snake_case = None
def __iter__( self : Optional[int] ):
__snake_case = self.head
while self.head:
yield node.data
__snake_case = node.next
if node == self.head:
break
def __len__( self : str ):
return sum(1 for _ in self )
def __repr__( self : Union[str, Any] ):
return "->".join(str(snake_case_ ) for item in iter(self ) )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Any ):
self.insert_nth(len(self ) , snake_case_ )
def lowerCAmelCase ( self : Tuple , snake_case_ : Any ):
self.insert_nth(0 , snake_case_ )
def lowerCAmelCase ( self : List[Any] , snake_case_ : int , snake_case_ : Any ):
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
__snake_case = Node(snake_case_ )
if self.head is None:
__snake_case = new_node # first node points itself
__snake_case = __snake_case = new_node
elif index == 0: # insert at head
__snake_case = self.head
__snake_case = __snake_case = new_node
else:
__snake_case = self.head
for _ in range(index - 1 ):
__snake_case = temp.next
__snake_case = temp.next
__snake_case = new_node
if index == len(self ) - 1: # insert at tail
__snake_case = new_node
def lowerCAmelCase ( self : List[Any] ):
return self.delete_nth(0 )
def lowerCAmelCase ( self : Dict ):
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase ( self : str , snake_case_ : int = 0 ):
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
__snake_case = self.head
if self.head == self.tail: # just one node
__snake_case = __snake_case = None
elif index == 0: # delete head node
__snake_case = self.tail.next.next
__snake_case = self.head.next
else:
__snake_case = self.head
for _ in range(index - 1 ):
__snake_case = temp.next
__snake_case = temp.next
__snake_case = temp.next.next
if index == len(self ) - 1: # delete at tail
__snake_case = temp
return delete_node.data
def lowerCAmelCase ( self : Dict ):
return len(self ) == 0
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
__snake_case = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
"""simple docstring"""
__snake_case = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__snake_case = remove_duplicates(key.upper() )
__snake_case = len(SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
__snake_case = {alphabet[i]: char for i, char in enumerate(SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(SCREAMING_SNAKE_CASE ) , 26 ):
__snake_case = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__snake_case = alphabet[i - offset]
__snake_case = char
return cipher_alphabet
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
__snake_case = input("Enter message to encode or decode: " ).strip()
__snake_case = input("Enter keyword: " ).strip()
__snake_case = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__snake_case = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__snake_case = create_cipher_map(SCREAMING_SNAKE_CASE )
print(func(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 163
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__magic_name__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
__magic_name__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
__magic_name__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
__magic_name__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
__magic_name__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def _UpperCamelCase ( self , a_ , a_ , a_=[1, 10, 100] , a_=4 , a_=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=a_ ) as executor:
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : List[str] = Counter()
lowerCamelCase_ : str = 0
lowerCamelCase_ : int = defaultdict(a_ )
for task_id, (candidates, test_case) in enumerate(zip(a_ , a_ ) ):
for candidate in candidates:
lowerCamelCase_ : str = candidate + "\n" + test_case
lowerCamelCase_ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase_ : Optional[Any] = executor.submit(a_ , *a_ )
futures.append(a_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a_ ):
lowerCamelCase_ : Dict = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
lowerCamelCase_ : Optional[Any] = [], []
for result in results.values():
result.sort()
lowerCamelCase_ : Optional[Any] = [r[1]["passed"] for r in result]
total.append(len(a_ ) )
correct.append(sum(a_ ) )
lowerCamelCase_ : int = np.array(a_ )
lowerCamelCase_ : List[str] = np.array(a_ )
lowerCamelCase_ : List[str] = k
lowerCamelCase_ : List[Any] = {F"""pass@{k}""": estimate_pass_at_k(a_ , a_ , a_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
def estimator(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowerCamelCase_ : Optional[int] = itertools.repeat(lowerCAmelCase_ , len(lowerCAmelCase_))
else:
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = iter(lowerCAmelCase_)
return np.array([estimator(int(lowerCAmelCase_) , int(lowerCAmelCase_) , lowerCAmelCase_) for n, c in zip(lowerCAmelCase_ , lowerCAmelCase_)])
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[int] = "▁"
A_ : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
A_ : int = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : Any = {
"facebook/mbart-large-50-one-to-many-mmt": 1_024,
}
# fmt: off
A_ : int = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Dict = ['input_ids', 'attention_mask']
lowerCamelCase__ : int = []
lowerCamelCase__ : str = []
def __init__( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : Optional[Any]="<unk>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : List[Any]="<mask>" , __UpperCAmelCase : List[Any] = None , **__UpperCAmelCase : Dict , ) -> Dict:
SCREAMING_SNAKE_CASE__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = len(self.sp_model )
SCREAMING_SNAKE_CASE__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE__ )
}
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE__ = src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE__ = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : List[str] , __UpperCAmelCase : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str ) -> Optional[int]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Tuple ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Any ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] = None ) -> List[Any]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict = None , __UpperCAmelCase : Dict = False ) -> Dict:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE__ )) + ([0] * len(SCREAMING_SNAKE_CASE__ )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str = None ) -> List[str]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] = "en_XX" , __UpperCAmelCase : Dict = None , __UpperCAmelCase : str = "ro_RO" , **__UpperCAmelCase : str , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE__ = [self.cur_lang_code_id]
SCREAMING_SNAKE_CASE__ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE__ = [self.cur_lang_code_id]
SCREAMING_SNAKE_CASE__ = [self.eos_token_id]
| 196
|
'''simple docstring'''
import torch
from torch import nn
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
super().__init__()
snake_case: str = n_token
snake_case: Optional[Any] = d_embed
snake_case: List[str] = d_proj
snake_case: List[Any] = cutoffs + [n_token]
snake_case: Any = [0] + self.cutoffs
snake_case: Union[str, Any] = div_val
snake_case: Optional[int] = self.cutoffs[0]
snake_case: List[Any] = len(self.cutoffs ) - 1
snake_case: Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case: Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case: Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case: Union[str, Any] = nn.ModuleList()
snake_case: List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
else:
self.out_projs.append(SCREAMING_SNAKE_CASE__ )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case , snake_case: Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case: Tuple = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE__ , r_idx - l_idx ) )
snake_case: str = keep_order
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if proj is None:
snake_case: str = nn.functional.linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case: List[Any] = nn.functional.linear(SCREAMING_SNAKE_CASE__ , proj.t().contiguous() )
snake_case: Any = nn.functional.linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
snake_case: List[Any] = hidden[..., :-1, :].contiguous()
snake_case: Tuple = labels[..., 1:].contiguous()
snake_case: List[Any] = hidden.view(-1 , hidden.size(-1 ) )
snake_case: Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
snake_case: int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case: List[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case: str = labels != -1_00
snake_case: List[str] = torch.zeros_like(SCREAMING_SNAKE_CASE__ , dtype=hidden.dtype , device=hidden.device )
snake_case: Tuple = (
-nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case: Optional[int] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
else:
# construct weights and biases
snake_case , snake_case: Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case , snake_case: List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case: List[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case: Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case: Any = self.out_layers[i].weight
snake_case: List[str] = self.out_layers[i].bias
if i == 0:
snake_case: Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case: List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE__ )
biases.append(SCREAMING_SNAKE_CASE__ )
snake_case , snake_case , snake_case: int = weights[0], biases[0], self.out_projs[0]
snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
if labels is None:
snake_case: str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case: int = torch.zeros_like(SCREAMING_SNAKE_CASE__ , dtype=hidden.dtype , device=hidden.device )
snake_case: Dict = 0
snake_case: List[str] = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
snake_case , snake_case: str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case: Optional[Any] = (labels >= l_idx) & (labels < r_idx)
snake_case: Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case: List[Any] = labels.index_select(0 , SCREAMING_SNAKE_CASE__ ) - l_idx
snake_case: Dict = head_logprob.index_select(0 , SCREAMING_SNAKE_CASE__ )
snake_case: str = hidden.index_select(0 , SCREAMING_SNAKE_CASE__ )
else:
snake_case: int = hidden
if i == 0:
if labels is not None:
snake_case: List[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case: Dict = head_logprob[:, : self.cutoffs[0]]
else:
snake_case , snake_case , snake_case: Union[str, Any] = weights[i], biases[i], self.out_projs[i]
snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
snake_case: Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case: List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case: Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case: Optional[int] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , SCREAMING_SNAKE_CASE__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.n_clusters == 0:
snake_case: List[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
else:
# construct weights and biases
snake_case , snake_case: Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case , snake_case: Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case: List[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case: Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case: Optional[int] = self.out_layers[i].weight
snake_case: Optional[Any] = self.out_layers[i].bias
if i == 0:
snake_case: Any = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case: Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE__ )
biases.append(SCREAMING_SNAKE_CASE__ )
snake_case , snake_case , snake_case: List[Any] = weights[0], biases[0], self.out_projs[0]
snake_case: Optional[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case: Tuple = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
snake_case: Any = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
snake_case , snake_case: Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case: Dict = head_logprob[:, : self.cutoffs[0]]
else:
snake_case , snake_case , snake_case: Optional[Any] = weights[i], biases[i], self.out_projs[i]
snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 )
snake_case: Tuple = head_logprob[:, -i] + tail_logprob_i
snake_case: Union[str, Any] = logprob_i
return out
| 329
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCamelCase__ ( _A: Union[str, Any] ):
'''simple docstring'''
def wrapper(*_A: Optional[Any] , **_A: List[Any] ):
__lowerCamelCase = timeit.default_timer()
__lowerCamelCase = func(*_A , **_A )
__lowerCamelCase = timeit.default_timer() - starttime
return delta
__lowerCamelCase = func.__name__
return wrapper
def UpperCamelCase__ ( _A: dict , _A: Any=100 , _A: List[str]=None ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = seq_shapes or {}
for i in range(_A ):
__lowerCamelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_A , _ArrayXD ):
__lowerCamelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_A , datasets.Value ):
if v.dtype == "string":
__lowerCamelCase = """The small grey turtle was surprisingly fast when challenged."""
else:
__lowerCamelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_A , datasets.Sequence ):
while isinstance(_A , datasets.Sequence ):
__lowerCamelCase = v.feature
__lowerCamelCase = seq_shapes[k]
__lowerCamelCase = np.random.rand(*_A ).astype(v.dtype )
__lowerCamelCase = data
dummy_data.append((i, example) )
return dummy_data
def UpperCamelCase__ ( _A: Tuple , _A: Union[str, Any] , _A: Union[str, Any]=100 , _A: List[str]=None ):
'''simple docstring'''
__lowerCamelCase = generate_examples(_A , num_examples=_A , seq_shapes=_A )
with ArrowWriter(features=_A , path=_A ) as writer:
for key, record in dummy_data:
__lowerCamelCase = features.encode_example(_A )
writer.write(_A )
__lowerCamelCase , __lowerCamelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
__lowerCamelCase = datasets.Dataset.from_file(filename=_A , info=datasets.DatasetInfo(features=_A ) )
return dataset
| 571
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
return EnvironmentCommand()
def UpperCamelCase__ ( _A: Union[str, Any] ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
__lowerCamelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , *UpperCAmelCase ):
__lowerCamelCase = accelerate_config_file
def lowerCamelCase_ ( self ):
__lowerCamelCase = """not installed"""
if is_safetensors_available():
import safetensors
__lowerCamelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__lowerCamelCase = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__lowerCamelCase = """not installed"""
__lowerCamelCase = __lowerCamelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowerCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase ):
__lowerCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__lowerCamelCase = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else f'''\t{accelerate_config}'''
)
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_torch_available():
import torch
__lowerCamelCase = torch.__version__
__lowerCamelCase = torch.cuda.is_available()
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_tf_available():
import tensorflow as tf
__lowerCamelCase = tf.__version__
try:
# deprecated in v2.1
__lowerCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowerCamelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
__lowerCamelCase = """not installed"""
__lowerCamelCase = """not installed"""
__lowerCamelCase = """not installed"""
__lowerCamelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
__lowerCamelCase = flax.__version__
__lowerCamelCase = jax.__version__
__lowerCamelCase = jaxlib.__version__
__lowerCamelCase = jax.lib.xla_bridge.get_backend().platform
__lowerCamelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase ) )
return info
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 571
| 1
|
import math
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
_UpperCAmelCase =0
_UpperCAmelCase =0
while num > 0:
_UpperCAmelCase =num % 8
_UpperCAmelCase =octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase ) ))
counter += 1
_UpperCAmelCase =math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(_lowerCamelCase )}"
def lowerCamelCase__ ( ) ->None:
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 408
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =StableDiffusionControlNetImgaImgPipeline
snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_UpperCAmelCase =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_UpperCAmelCase =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase =CLIPTextModel(_snake_case )
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase ={
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase =2
_UpperCAmelCase =randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
_UpperCAmelCase =floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
_UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
_UpperCAmelCase ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =StableDiffusionControlNetImgaImgPipeline
snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_snake_case ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_UpperCAmelCase =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
_UpperCAmelCase =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
_UpperCAmelCase =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase =CLIPTextModel(_snake_case )
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =MultiControlNetModel([controlneta, controlneta] )
_UpperCAmelCase ={
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase =2
_UpperCAmelCase =[
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
_UpperCAmelCase =floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
_UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
_UpperCAmelCase ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
_UpperCAmelCase =10.0
_UpperCAmelCase =4
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =steps
_UpperCAmelCase =scale
_UpperCAmelCase =pipe(**_snake_case )[0]
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =steps
_UpperCAmelCase =scale
_UpperCAmelCase =pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =steps
_UpperCAmelCase =scale
_UpperCAmelCase =pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =steps
_UpperCAmelCase =scale
_UpperCAmelCase =pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def SCREAMING_SNAKE_CASE ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
_UpperCAmelCase =StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase ="evil space-punk bird"
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
_UpperCAmelCase =load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
_UpperCAmelCase =pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type="np" , num_inference_steps=50 , strength=0.6 , )
_UpperCAmelCase =output.images[0]
assert image.shape == (512, 512, 3)
_UpperCAmelCase =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 408
| 1
|
# Function to print upper half of diamond (pyramid)
def lowerCamelCase__ ( _lowerCamelCase ) ->Dict:
for i in range(0 , _lowerCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
for i in range(_lowerCamelCase , 0 , -1 ):
for _ in range(_lowerCamelCase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def lowerCamelCase__ ( _lowerCamelCase ) ->Optional[int]:
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_lowerCamelCase ) # upper half
reverse_floyd(_lowerCamelCase ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
snake_case__ : Optional[Any] = 1
while K:
snake_case__ : Optional[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
snake_case__ : str = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 592
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 592
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowerCAmelCase_ ( lowerCamelCase ):
if "emb" in name:
__magic_name__ : List[str] =name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__magic_name__ : Optional[int] =name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__magic_name__ : List[str] =name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__magic_name__ : Tuple =name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__magic_name__ : Tuple =name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__magic_name__ : Optional[int] =name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__magic_name__ : Optional[int] =name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__magic_name__ : str =name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__magic_name__ : Any =name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__magic_name__ : Dict =name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__magic_name__ : List[str] =name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =list(state_dict.keys() )
__magic_name__ : List[Any] ={}
for key in keys:
__magic_name__ : Union[str, Any] =state_dict.pop(lowerCamelCase )
__magic_name__ : List[Any] =rename_keys(lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__magic_name__ : Optional[Any] =val[:hidden_size, :]
__magic_name__ : Optional[int] =val[hidden_size : 2 * hidden_size, :]
__magic_name__ : Dict =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__magic_name__ : List[Any] =val
else:
__magic_name__ : Dict =val
return state_dict, enc_dec_proj_state_dict
def lowerCAmelCase_ ( lowerCamelCase ):
if checkpoint == "small":
# default config values
__magic_name__ : int =1024
__magic_name__ : str =24
__magic_name__ : int =16
elif checkpoint == "medium":
__magic_name__ : int =1536
__magic_name__ : List[Any] =48
__magic_name__ : List[str] =24
elif checkpoint == "large":
__magic_name__ : List[Any] =2048
__magic_name__ : Any =48
__magic_name__ : Union[str, Any] =32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__magic_name__ : Tuple =MusicgenDecoderConfig(
hidden_size=lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase , num_attention_heads=lowerCamelCase , )
return config
@torch.no_grad()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="cpu" ):
__magic_name__ : str =MusicGen.get_pretrained(lowerCamelCase , device=lowerCamelCase )
__magic_name__ : List[Any] =decoder_config_from_checkpoint(lowerCamelCase )
__magic_name__ : int =fairseq_model.lm.state_dict()
__magic_name__ , __magic_name__ : Optional[int] =rename_state_dict(
lowerCamelCase , hidden_size=decoder_config.hidden_size )
__magic_name__ : List[Any] =TaEncoderModel.from_pretrained("""t5-base""" )
__magic_name__ : List[Any] =EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__magic_name__ : Any =MusicgenForCausalLM(lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__magic_name__ , __magic_name__ : List[str] =decoder.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(lowerCamelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__magic_name__ : List[Any] =MusicgenForConditionalGeneration(text_encoder=lowerCamelCase , audio_encoder=lowerCamelCase , decoder=lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase )
# check we can do a forward pass
__magic_name__ : Any =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__magic_name__ : Dict =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__magic_name__ : int =model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__magic_name__ : Dict =AutoTokenizer.from_pretrained("""t5-base""" )
__magic_name__ : List[Any] =AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__magic_name__ : Optional[int] =MusicgenProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
# set the appropriate bos/pad token ids
__magic_name__ : List[str] =2048
__magic_name__ : Optional[int] =2048
# set other default generation config params
__magic_name__ : str =int(30 * audio_encoder.config.frame_rate )
__magic_name__ : Union[str, Any] =True
__magic_name__ : Any =3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(lowerCamelCase )
processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 21
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCamelCase_ = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=__a , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=__a , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=__a , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=__a , default="""data/dump""" , help="""The dump file prefix.""" )
UpperCamelCase__ = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCamelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
UpperCamelCase__ = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCamelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
UpperCamelCase__ = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCamelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
UpperCamelCase__ = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
UpperCamelCase__ = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"{len(__a )} examples to process." )
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = 10_000
UpperCamelCase__ = time.time()
for text in data:
UpperCamelCase__ = f"{bos} {text.strip()} {sep}"
UpperCamelCase__ = tokenizer.encode(__a , add_special_tokens=__a )
rslt.append(__a )
iter += 1
if iter % interval == 0:
UpperCamelCase__ = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCamelCase__ = time.time()
logger.info("""Finished binarization""" )
logger.info(f"{len(__a )} examples processed." )
UpperCamelCase__ = f"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCamelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCamelCase__ = [np.uintaa(__a ) for d in rslt]
else:
UpperCamelCase__ = [np.intaa(__a ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(__a , """wb""" ) as handle:
pickle.dump(rslt_ , __a , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 513
| 0
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__a = Mapping[str, np.ndarray]
__a = Mapping[str, Any] # Is a nested dict.
__a = 0.01
@dataclasses.dataclass(frozen=UpperCAmelCase )
class lowercase__:
"""simple docstring"""
a :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
a :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
a :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
a :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
a :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
a :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
a :Optional[str] = None
# Templates used to generate this protein (prediction-only)
a :Optional[Sequence[str]] = None
# Chain corresponding to each parent
a :Optional[Sequence[int]] = None
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = r'''(\[[A-Z]+\]\n)'''
lowercase_ = [tag.strip() for tag in re.split(snake_case__ , snake_case__ ) if len(snake_case__ ) > 0]
lowercase_ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase_ = ['''N''', '''CA''', '''C''']
lowercase_ = None
lowercase_ = None
lowercase_ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase_ = g[1][0].strip()
for i in range(len(snake_case__ ) ):
if seq[i] not in residue_constants.restypes:
lowercase_ = '''X''' # FIXME: strings are immutable
lowercase_ = np.array(
[residue_constants.restype_order.get(snake_case__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase_ = []
for axis in range(3 ):
tertiary.append(list(map(snake_case__ , g[1][axis].split() ) ) )
lowercase_ = np.array(snake_case__ )
lowercase_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(snake_case__ ):
lowercase_ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase_ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase_ = np.zeros(
(
len(snake_case__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(snake_case__ ):
lowercase_ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=snake_case__ , atom_mask=snake_case__ , aatype=snake_case__ , residue_index=np.arange(len(snake_case__ ) ) , b_factors=snake_case__ , )
def a ( snake_case__: Protein , snake_case__: int = 0 ):
'''simple docstring'''
lowercase_ = []
lowercase_ = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
lowercase_ = prot.parents
lowercase_ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase_ = [p for i, p in zip(snake_case__ , snake_case__ ) if i == chain_id]
if parents is None or len(snake_case__ ) == 0:
lowercase_ = ['''N/A''']
pdb_headers.append(F'''PARENT {' '.join(snake_case__ )}''' )
return pdb_headers
def a ( snake_case__: Protein , snake_case__: str ):
'''simple docstring'''
lowercase_ = []
lowercase_ = pdb_str.split('''\n''' )
lowercase_ = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
lowercase_ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase_ = []
if prot.parents_chain_index is not None:
lowercase_ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(snake_case__ ) , [] )
parent_dict[str(snake_case__ )].append(snake_case__ )
lowercase_ = max([int(snake_case__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase_ = parent_dict.get(str(snake_case__ ) , ['''N/A'''] )
parents_per_chain.append(snake_case__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase_ = [['''N/A''']]
def make_parent_line(snake_case__: Sequence[str] ) -> str:
return F'''PARENT {' '.join(snake_case__ )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase_ = 0
for i, l in enumerate(snake_case__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(snake_case__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(snake_case__ ):
lowercase_ = parents_per_chain[chain_counter]
else:
lowercase_ = ['''N/A''']
out_pdb_lines.append(make_parent_line(snake_case__ ) )
return "\n".join(snake_case__ )
def a ( snake_case__: Protein ):
'''simple docstring'''
lowercase_ = residue_constants.restypes + ['''X''']
def res_atoa(snake_case__: int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase_ = residue_constants.atom_types
lowercase_ = []
lowercase_ = prot.atom_mask
lowercase_ = prot.aatype
lowercase_ = prot.atom_positions
lowercase_ = prot.residue_index.astype(np.intaa )
lowercase_ = prot.b_factors
lowercase_ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase_ = get_pdb_headers(snake_case__ )
if len(snake_case__ ) > 0:
pdb_lines.extend(snake_case__ )
lowercase_ = aatype.shape[0]
lowercase_ = 1
lowercase_ = 0
lowercase_ = string.ascii_uppercase
lowercase_ = None
# Add all atom sites.
for i in range(snake_case__ ):
lowercase_ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(snake_case__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase_ = '''ATOM'''
lowercase_ = atom_name if len(snake_case__ ) == 4 else F''' {atom_name}'''
lowercase_ = ''''''
lowercase_ = ''''''
lowercase_ = 1.0_0
lowercase_ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase_ = ''''''
lowercase_ = '''A'''
if chain_index is not None:
lowercase_ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase_ = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(snake_case__ )
atom_index += 1
lowercase_ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase_ = True
lowercase_ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase_ = '''TER'''
lowercase_ = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(snake_case__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(snake_case__ , snake_case__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(snake_case__ )
def a ( snake_case__: Protein ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def a ( snake_case__: FeatureDict , snake_case__: ModelOutput , snake_case__: Optional[np.ndarray] = None , snake_case__: Optional[np.ndarray] = None , snake_case__: Optional[str] = None , snake_case__: Optional[Sequence[str]] = None , snake_case__: Optional[Sequence[int]] = None , ):
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=snake_case__ , remark=snake_case__ , parents=snake_case__ , parents_chain_index=snake_case__ , )
| 721
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__a = 5_0_0_0_0
__a = 5_0_0_0
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: Tuple ):
'''simple docstring'''
for i in range(snake_case__ ):
lowercase_ = dataset[i]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: List[str] , snake_case__: Tuple ):
'''simple docstring'''
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowercase_ = dataset[i : i + batch_size]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: Tuple , snake_case__: Optional[Any] ):
'''simple docstring'''
with dataset.formatted_as(type=snake_case__ ):
for i in range(snake_case__ ):
lowercase_ = dataset[i]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: List[Any] , snake_case__: Dict , snake_case__: Any ):
'''simple docstring'''
with dataset.formatted_as(type=snake_case__ ):
for i in range(0 , snake_case__ , snake_case__ ):
lowercase_ = dataset[i : i + batch_size]
def a ( ):
'''simple docstring'''
lowercase_ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
lowercase_ = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
lowercase_ = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
lowercase_ = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
lowercase_ = generate_example_dataset(
os.path.join(snake_case__ , '''dataset.arrow''' ) , snake_case__ , num_examples=snake_case__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(snake_case__ ) )
lowercase_ = func(snake_case__ , **snake_case__ )
print('''shuffling dataset''' )
lowercase_ = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(snake_case__ ) )
lowercase_ = func(
snake_case__ , **snake_case__ )
with open(snake_case__ , '''wb''' ) as f:
f.write(json.dumps(snake_case__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 409
| 0
|
'''simple docstring'''
__UpperCAmelCase = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
__UpperCAmelCase = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def _snake_case ( A , A , A ) -> float:
lowerCAmelCase__ = from_type.lower().strip('''s''' )
lowerCAmelCase__ = to_type.lower().strip('''s''' )
lowerCAmelCase__ = UNIT_SYMBOL.get(A , A )
lowerCAmelCase__ = UNIT_SYMBOL.get(A , A )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(A )}"""
)
raise ValueError(A )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase__ = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(A )}"""
)
raise ValueError(A )
lowerCAmelCase__ = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase__ = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase__ = 1
if from_exponent > to_exponent:
lowerCAmelCase__ = from_exponent - to_exponent
else:
lowerCAmelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 90
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'mra'
def __init__( self , __lowerCAmelCase=5_0265 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="absolute" , __lowerCAmelCase=4 , __lowerCAmelCase="full" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = block_per_row
lowercase = approx_mode
lowercase = initial_prior_first_n_blocks
lowercase = initial_prior_diagonal_n_blocks
| 359
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
from __future__ import annotations
def _a ( __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = []
create_all_state(1 , lowercase__ , lowercase__ , [] , lowercase__ )
return result
def _a ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowercase__ , total_number - level + 2 ):
current_list.append(lowercase__ )
create_all_state(i + 1 , lowercase__ , level - 1 , lowercase__ , lowercase__ )
current_list.pop()
def _a ( __lowercase ) -> int:
"""simple docstring"""
for i in total_list:
print(*lowercase__ )
if __name__ == "__main__":
_snake_case = 4
_snake_case = 2
_snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 383
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
A = logging.get_logger(__name__)
A = 'T5Config'
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
| 187
| 0
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_lowerCamelCase : Dict = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_lowerCamelCase : Tuple = logging.WARNING
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : str = os.getenv("""DATASETS_VERBOSITY""" , UpperCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _UpperCAmelCase ():
'''simple docstring'''
return __name__.split(""".""" )[0]
def _UpperCAmelCase ():
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _UpperCAmelCase ():
'''simple docstring'''
# Apply our default configuration to the library root logger.
_lowerCAmelCase : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _UpperCAmelCase (UpperCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
_lowerCAmelCase : Optional[int] = _get_library_name()
return logging.getLogger(UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
return set_verbosity(UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
return set_verbosity(UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
return set_verbosity(UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
return set_verbosity(UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Tuple = False
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : List[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __snake_case :
def __init__( self : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Tuple: # pylint: disable=unused-argument
'''simple docstring'''
_lowerCAmelCase : Dict = args[0] if args else None
def __iter__( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Dict , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
def empty_fn(*_UpperCAmelCase : str , **_UpperCAmelCase : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict ) -> Tuple:
'''simple docstring'''
return self
def __exit__( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
return
_lowerCamelCase : Any = True
class __snake_case :
def __call__( self : Any , *_UpperCAmelCase : Optional[int] , _UpperCAmelCase : str=False , **_UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_UpperCAmelCase , **_UpperCAmelCase )
else:
return EmptyTqdm(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , *_UpperCAmelCase : Dict , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Any = _tqdm_cls()
def _UpperCAmelCase ():
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _UpperCAmelCase ():
'''simple docstring'''
global _tqdm_active
_lowerCAmelCase : List[Any] = True
def _UpperCAmelCase ():
'''simple docstring'''
global _tqdm_active
_lowerCAmelCase : List[Any] = False
| 196
|
import comet # From: unbabel-comet
import torch
import datasets
_lowerCamelCase : List[Any] = datasets.logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
_lowerCamelCase : Union[str, Any] = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
_lowerCamelCase : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
if self.config_name == "default":
_lowerCAmelCase : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
_lowerCAmelCase : Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=False ) -> int:
'''simple docstring'''
if gpus is None:
_lowerCAmelCase : Tuple = 1 if torch.cuda.is_available() else 0
_lowerCAmelCase : Optional[int] = {"""src""": sources, """mt""": predictions, """ref""": references}
_lowerCAmelCase : List[Any] = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 196
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =StableDiffusionPanoramaPipeline
__A : Tuple =TEXT_TO_IMAGE_PARAMS
__A : Dict =TEXT_TO_IMAGE_BATCH_PARAMS
__A : Union[str, Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
__A : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
UpperCAmelCase_ : int = torch.manual_seed(_snake_case )
UpperCAmelCase_ : Optional[Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = sd_pipe(**_snake_case ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.25E-3 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = "french fries"
UpperCAmelCase_ : Tuple = sd_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Dict = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ,view_batch_size=2 )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Any = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" )
UpperCAmelCase_ : str = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(**_snake_case ).images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = PNDMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,skip_prk_steps=_snake_case )
UpperCAmelCase_ : Tuple = StableDiffusionPanoramaPipeline(**_snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : str = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe(**_snake_case ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case=0 ):
UpperCAmelCase_ : Any = torch.manual_seed(_snake_case )
UpperCAmelCase_ : List[Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase_ : int = DDIMScheduler.from_pretrained(_snake_case ,subfolder="scheduler" )
UpperCAmelCase_ : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case ,scheduler=_snake_case ,safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Any = self.get_inputs()
UpperCAmelCase_ : List[str] = pipe(**_snake_case ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCAmelCase_ : Optional[int] = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" ,safety_checker=_snake_case )
UpperCAmelCase_ : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
UpperCAmelCase_ : str = self.get_inputs()
UpperCAmelCase_ : Optional[Any] = pipe(**_snake_case ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCAmelCase_ : str = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = 0
def callback_fn(_snake_case ,_snake_case ,_snake_case ) -> None:
UpperCAmelCase_ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCAmelCase_ : Tuple = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase_ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCAmelCase_ : Optional[int] = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase_ : Optional[Any] = DDIMScheduler.from_pretrained(_snake_case ,subfolder="scheduler" )
UpperCAmelCase_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case ,scheduler=_snake_case ,safety_checker=_snake_case )
UpperCAmelCase_ : int = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Optional[int] = self.get_inputs()
pipe(**_snake_case ,callback=_snake_case ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ : Dict = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler.from_pretrained(_snake_case ,subfolder="scheduler" )
UpperCAmelCase_ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case ,scheduler=_snake_case ,safety_checker=_snake_case )
UpperCAmelCase_ : Tuple = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : Union[str, Any] = self.get_inputs()
UpperCAmelCase_ : Dict = pipe(**_snake_case )
UpperCAmelCase_ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 71
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
SCREAMING_SNAKE_CASE__:Union[str, Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__:List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE__:Union[str, Any] = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
SCREAMING_SNAKE_CASE__:Optional[int] = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE__:Optional[Any] = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
SCREAMING_SNAKE_CASE__:int = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def _lowerCamelCase( a ):
__a = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , a )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ):
__a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__a = collections.defaultdict(a )
__a = collections.defaultdict(a )
__a = collections.defaultdict(a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(a ):
__a = None
if _re_tf_models.match(a ) is not None:
__a = tf_models
__a = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
__a = flax_models
__a = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
__a = pt_models
__a = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_prefix_to_model_type:
__a = True
break
# Try again after removing the last word in the name
__a = "".join(camel_case_split(a )[:-1] )
__a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__a = list(a )
all_models.sort()
__a = {"model_type": all_models}
__a = [pt_models[t] for t in all_models]
__a = [tf_models[t] for t in all_models]
__a = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__a = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__a = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__a = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__a = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__a = "AutoTokenizer"
__a = [processors[t] for t in all_models]
return pd.DataFrame(a )
def _lowerCamelCase( a ):
__a = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__a = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
__a = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(a , a , a ):
# The type of pipeline may not exist in this framework
if not hasattr(a , a ):
continue
# First extract all model_names
__a = []
for name in getattr(a , a ).values():
if isinstance(a , a ):
model_names.append(a )
else:
model_names.extend(list(a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( a , a ):
__a = get_frameworks_table()
__a = Dataset.from_pandas(a )
__a = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=a )
__a = Dataset.from_json(a )
__a = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(a ) )
}
__a = update_pipeline_and_auto_class_table(a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__a = sorted(table.keys() )
__a = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__a = Dataset.from_pandas(a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(a , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(a , "pipeline_tags.json" ) )
if commit_sha is not None:
__a = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
__a = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=a , repo_type="dataset" , token=a , commit_message=a , )
def _lowerCamelCase( ):
__a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__a = transformers_module.pipelines.SUPPORTED_TASKS
__a = []
for key in pipeline_tasks:
if key not in in_table:
__a = pipeline_tasks[key]["pt"]
if isinstance(a , (list, tuple) ):
__a = model[0]
__a = model.__name__
if model not in in_table.values():
missing.append(a )
if len(a ) > 0:
__a = ", ".join(a )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 528
| 0
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__A : Any = parse(importlib.metadata.version("""torch"""))
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}''')
lowerCamelCase__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(a__ , a__):
lowerCamelCase__ = parse(importlib.metadata.version(a__))
return operation(a__ , parse(a__))
def lowerCamelCase_ ( lowercase__ , lowercase__):
return compare_versions(a__ , a__ , a__)
| 716
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__A : Any = get_logger()
__A : Optional[dict] = None
class lowercase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : str ) -> List[Any]:
'''simple docstring'''
super().__init__(features=__lowerCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__lowerCamelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowerCamelCase__ = device if isinstance(__lowerCamelCase , __lowerCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCamelCase__ = str(jax.devices()[0] )
lowerCamelCase__ = jnp_array_kwargs
@staticmethod
def a__ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__lowerCamelCase ): device for device in jax.devices()}
def a__ ( self : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__lowerCamelCase , axis=0 )
return column
def a__ ( self : List[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCamelCase__ = {"dtype": jnp.intaa}
else:
lowerCamelCase__ = {"dtype": jnp.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
lowerCamelCase__ = np.asarray(__lowerCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def a__ ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , jax.Array ):
lowerCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : dict ) -> int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def a__ ( self : Union[str, Any] , __lowerCamelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def a__ ( self : List[Any] , __lowerCamelCase : pa.Table ) -> "jax.Array":
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
lowerCamelCase__ = self.recursive_tensorize(__lowerCamelCase )
lowerCamelCase__ = self._consolidate(__lowerCamelCase )
return column
def a__ ( self : List[str] , __lowerCamelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_batch(__lowerCamelCase )
lowerCamelCase__ = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
lowerCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 187
| 0
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A_ = "<<<<<<< This should probably be modified because it mentions: "
A_ = "=======\n>>>>>>>\n"
A_ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
A_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def _UpperCamelCase ( A ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( UpperCamelCase_: ArgumentParser ):
UpperCamelCase_ =parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__UpperCamelCase )
def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: List[str] ):
UpperCamelCase_ =get_logger("datasets-cli/converting" )
UpperCamelCase_ =tfds_path
UpperCamelCase_ =datasets_directory
def UpperCamelCase__ ( self: str ):
if os.path.isdir(self._tfds_path ):
UpperCamelCase_ =os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCamelCase_ =os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
UpperCamelCase_ =os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
UpperCamelCase_ =[]
UpperCamelCase_ =[]
UpperCamelCase_ ={}
if os.path.isdir(self._tfds_path ):
UpperCamelCase_ =os.listdir(__UpperCamelCase )
else:
UpperCamelCase_ =[os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
UpperCamelCase_ =os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ =os.path.join(__UpperCamelCase , __UpperCamelCase )
if not os.path.isfile(__UpperCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__UpperCamelCase , encoding="utf-8" ) as f:
UpperCamelCase_ =f.readlines()
UpperCamelCase_ =[]
UpperCamelCase_ =False
UpperCamelCase_ =False
UpperCamelCase_ =[]
for line in lines:
UpperCamelCase_ =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCamelCase_ ="import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
UpperCamelCase_ =""
continue
elif "from absl import logging" in out_line:
UpperCamelCase_ ="from datasets import logging\n"
elif "getLogger" in out_line:
UpperCamelCase_ =out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCamelCase_ =True
UpperCamelCase_ =list(filter(lambda UpperCamelCase_ : e in out_line , __UpperCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__UpperCamelCase ) + "\n" )
out_lines.append(__UpperCamelCase )
out_lines.append(__UpperCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCamelCase_ =re.sub(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCamelCase_ =re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __UpperCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
UpperCamelCase_ ="from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCamelCase_ =True
out_lines.append(__UpperCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCamelCase_ =f_name.replace(".py" , "" )
UpperCamelCase_ =os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ =os.path.join(__UpperCamelCase , __UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__UpperCamelCase )
if needs_manual_update:
with_manual_update.append(__UpperCamelCase )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__UpperCamelCase )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
UpperCamelCase_ =os.path.basename(__UpperCamelCase )
UpperCamelCase_ =imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__UpperCamelCase , __UpperCamelCase )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
| 391
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684
| 0
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'spiece.model'}
__snake_case = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
__snake_case = {
'AI-Sweden/gpt-sw3-126m': 20_48,
'AI-Sweden/gpt-sw3-350m': 20_48,
'AI-Sweden/gpt-sw3-1.6b': 20_48,
'AI-Sweden/gpt-sw3-6.7b': 20_48,
'AI-Sweden/gpt-sw3-20b': 20_48,
}
class lowercase__ ( UpperCamelCase_ ):
A__ : Tuple =VOCAB_FILES_NAMES
A__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] =["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE__ = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
SCREAMING_SNAKE_CASE__ = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE__ = '''<|endoftext|>''' if eos_token is None else eos_token
SCREAMING_SNAKE_CASE__ = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE__ = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE__ = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE__ = '''<pad>''' if pad_token is None else pad_token
SCREAMING_SNAKE_CASE__ = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = remove_space
SCREAMING_SNAKE_CASE__ = keep_accents
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE__ = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE__ = re.compile(
F'[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self : Any ):
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def A_ ( self : Dict ):
return len(self.sp_model )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = self.non_printing_characters_re.sub('' , UpperCamelCase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE__ = ''''''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE__ = unicodedata.normalize('NFC' , UpperCamelCase__ )
return text
def A_ ( self : str , UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def A_ ( self : str , UpperCAmelCase_ : int ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def A_ ( UpperCAmelCase_ : str ):
return out_string
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[str, bool] = False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = self.preprocess_text(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = self.sp_model.encode(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
SCREAMING_SNAKE_CASE__ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE__ = torch.tensor(UpperCamelCase__ )
return token_ids
def A_ ( self : Any , UpperCAmelCase_ : Union[int, List[int]] ):
return self.sp_model.decode(UpperCamelCase__ )
def A_ ( self : Tuple , UpperCAmelCase_ : "Conversation" ):
SCREAMING_SNAKE_CASE__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(UpperCamelCase__ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=UpperCamelCase__ )
| 712
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""mvp"""
A__ : Optional[int] =["""past_key_values"""]
A__ : List[str] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , UpperCAmelCase_ : List[str]=50267 , UpperCAmelCase_ : Optional[Any]=1024 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Tuple=4096 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=4096 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : List[str]=1024 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=100 , UpperCAmelCase_ : Optional[Any]=800 , **UpperCAmelCase_ : Tuple , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = use_prompt
SCREAMING_SNAKE_CASE__ = prompt_length
SCREAMING_SNAKE_CASE__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 400
| 0
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ):
_a = len(set_a.intersection(UpperCamelCase ) )
if alternative_union:
_a = len(UpperCamelCase ) + len(UpperCamelCase )
else:
_a = len(set_a.union(UpperCamelCase ) )
return intersection / union
if isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) ):
_a = [element for element in set_a if element in set_b]
if alternative_union:
_a = len(UpperCamelCase ) + len(UpperCamelCase )
return len(UpperCamelCase ) / union
else:
_a = set_a + [element for element in set_b if element not in set_a]
return len(UpperCamelCase ) / len(UpperCamelCase )
return len(UpperCamelCase ) / len(UpperCamelCase )
return None
if __name__ == "__main__":
_snake_case : Dict = {'a', 'b', 'c', 'd', 'e'}
_snake_case : Dict = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 22
|
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
re.sub("""<n>""" , """""" , __lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCAmelCase ) )
| 269
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673
| 0
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCamelCase__ = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def lowercase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
snake_case__ : Dict ={
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case__ : int =int(re.match(R'''.*layer_(\d*).*''' , SCREAMING_SNAKE_CASE )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
snake_case__ : Dict =re.search(R'''[^\d](\d+)$''' , str(SCREAMING_SNAKE_CASE ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
snake_case__ : Optional[Any] =int(bit_search.groups()[0] )
return bit_size // 8
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
# Construct model
if bloom_config_file == "":
snake_case__ : Tuple =BloomConfig()
else:
snake_case__ : Any =BloomConfig.from_json_file(SCREAMING_SNAKE_CASE )
if shard_model:
snake_case__ : Optional[int] =os.listdir(SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =sorted(filter(lambda SCREAMING_SNAKE_CASE : s.startswith('''layer''' ) and "model_00" in s , SCREAMING_SNAKE_CASE ) )
snake_case__ : int ={'''weight_map''': {}, '''metadata''': {}}
snake_case__ : List[str] =0
snake_case__ : int =None
snake_case__ : Optional[int] =BloomConfig()
for j, file in enumerate(SCREAMING_SNAKE_CASE ):
print('''Processing file: {}'''.format(SCREAMING_SNAKE_CASE ) )
snake_case__ : str =None
for i in range(SCREAMING_SNAKE_CASE ):
# load all TP files
snake_case__ : int =file.replace('''model_00''' , F'''model_0{i}''' )
snake_case__ : List[str] =torch.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case__ : Dict =list(temp.keys() )
for key in keys:
snake_case__ : Optional[Any] =temp.pop(SCREAMING_SNAKE_CASE )
if tensors is None:
snake_case__ : Optional[int] =temp
else:
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : int =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : int =torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Any =tensors[key] / pretraining_tp
torch.save(
SCREAMING_SNAKE_CASE , os.path.join(
SCREAMING_SNAKE_CASE , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case__ : List[Any] =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case__ : int ='''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE ) ).zfill(5 ) )
snake_case__ : Tuple =BloomConfig()
snake_case__ : Tuple =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case__ : Optional[int] =total_size
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(SCREAMING_SNAKE_CASE , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case__ : Tuple =json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[Any] =BloomModel(SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =os.listdir(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =sorted(filter(lambda SCREAMING_SNAKE_CASE : s.startswith('''layer''' ) and "model_00" in s , SCREAMING_SNAKE_CASE ) )
snake_case__ : List[str] =None
for i, file in enumerate(SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] =None
for i in range(SCREAMING_SNAKE_CASE ):
# load all TP files
snake_case__ : List[Any] =file.replace('''model_00''' , F'''model_0{i}''' )
snake_case__ : Optional[int] =torch.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case__ : Dict =list(temp.keys() )
for key in keys:
snake_case__ : Optional[int] =temp.pop(SCREAMING_SNAKE_CASE )
if tensors is None:
snake_case__ : Union[str, Any] =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Any =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Tuple =torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : List[Any] =tensors[key] / pretraining_tp
snake_case__ : List[Any] =model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
snake_case__ : Optional[Any] =set(other_keys.missing_keys )
else:
snake_case__ : List[str] =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case__ : str =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
snake_case__ : Any =model.to(config.torch_dtype )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCamelCase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 381
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : Union[str, Any] =old_name
if "patch_embed" in old_name:
snake_case__, snake_case__, snake_case__ : int =old_name.split('''.''' )
if layer == "0":
snake_case__ : Tuple =old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
snake_case__ : int =old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
snake_case__ : str =old_name.replace('''3''' , '''convolution2''' )
else:
snake_case__ : Tuple =old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] =R'''\b\d{2}\b'''
if bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
snake_case__ : Any =re.search(R'''\d\.\d\d.''' , SCREAMING_SNAKE_CASE ).group()
else:
snake_case__ : List[Any] =re.search(R'''\d\.\d.''' , SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
snake_case__ : int =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
snake_case__ : Tuple =trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
snake_case__ : Union[str, Any] ='''intermediate_stages.''' + trimmed_name
else:
snake_case__ : Optional[int] =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
snake_case__ : Optional[Any] =str(int(match[2] ) - num_meta4D_last_stage )
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
snake_case__ : Tuple =trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
snake_case__ : Union[str, Any] =trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
snake_case__ : str =trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
snake_case__ : Optional[Any] =trimmed_name.replace('''fc2''' , '''linear_out''' )
snake_case__ : Dict ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , SCREAMING_SNAKE_CASE ):
snake_case__ : int =old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case__ : Any =new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case__ : Dict =new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
snake_case__ : List[Any] =new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
snake_case__ : List[Any] =new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
snake_case__ : Union[str, Any] ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case__ : int =new_name.replace('''norm''' , '''layernorm''' )
snake_case__ : Dict ='''efficientformer.''' + new_name
else:
snake_case__ : List[Any] ='''efficientformer.encoder.''' + new_name
return new_name
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
snake_case__ : List[Any] =checkpoint.pop(SCREAMING_SNAKE_CASE )
snake_case__ : Any =val
return checkpoint
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Optional[int] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
def lowercase_ ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
snake_case__ : Union[str, Any] =torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
snake_case__ : int =EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE )
snake_case__ : Dict =EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
snake_case__ : List[Any] =config.depths[-1] - config.num_metaad_blocks + 1
snake_case__ : Dict =convert_torch_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
snake_case__ : Any =prepare_img()
snake_case__ : str =2_56
snake_case__ : List[Any] =2_24
snake_case__ : Any =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
snake_case__ : int =processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
# original processing pipeline
snake_case__ : List[str] =Compose(
[
Resize(SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
] )
snake_case__ : Tuple =image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =outputs.logits
snake_case__ : Optional[Any] =(1, 10_00)
if "l1" in model_name:
snake_case__ : Union[str, Any] =torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case__ : Optional[Any] =torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case__ : Dict =torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 381
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : Any =16
UpperCAmelCase : Dict =32
def _lowerCAmelCase (_lowerCAmelCase):
return int(x / 2**20)
class _lowercase :
'''simple docstring'''
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase_ = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case__ ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase_ = torch.cuda.memory_allocated()
UpperCamelCase_ = torch.cuda.max_memory_allocated()
UpperCamelCase_ = bamb(self.end - self.begin )
UpperCamelCase_ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase = 16 , _lowerCAmelCase = "bert-base-cased" , _lowerCAmelCase = 3_20 , _lowerCAmelCase = 1_60 , ):
UpperCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase__)
UpperCamelCase_ = load_dataset(
"glue" , "mrpc" , split={"train": f"""train[:{n_train}]""", "validation": f"""validation[:{n_val}]"""})
def tokenize_function(_lowerCAmelCase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_ = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=UpperCamelCase__)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(_lowerCAmelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding="max_length" , max_length=1_28 , return_tensors="pt")
return tokenizer.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt")
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__)
UpperCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__)
return train_dataloader, eval_dataloader
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
# Initialize accelerator
UpperCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["lr"]
UpperCamelCase_ = int(config["num_epochs"])
UpperCamelCase_ = int(config["seed"])
UpperCamelCase_ = int(config["batch_size"])
UpperCamelCase_ = args.model_name_or_path
set_seed(UpperCamelCase__)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__)
# Instantiate optimizer
UpperCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_ = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__)
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase_ = 1
UpperCamelCase_ = (len(UpperCamelCase__) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
UpperCamelCase_ = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_ = 0
# Now we train the model
UpperCamelCase_ = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(UpperCamelCase__):
UpperCamelCase_ = model(**UpperCamelCase__)
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin)))
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin)))
UpperCamelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin)
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json") , "w") as f:
json.dump(UpperCamelCase__ , UpperCamelCase__)
def _lowerCAmelCase ():
UpperCamelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path" , type=UpperCamelCase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCamelCase__ , )
parser.add_argument(
"--output_dir" , type=UpperCamelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=UpperCamelCase__ , default=3_20 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=UpperCamelCase__ , default=1_60 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=UpperCamelCase__ , default=1 , help="Number of train epochs." , )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(UpperCamelCase__ , UpperCamelCase__)
if __name__ == "__main__":
main()
| 715
|
UpperCAmelCase : Any =0 # The first color of the flag.
UpperCAmelCase : Optional[int] =1 # The second color of the flag.
UpperCAmelCase : Optional[Any] =2 # The third color of the flag.
UpperCAmelCase : Union[str, Any] =(red, white, blue)
def _lowerCAmelCase (_lowerCAmelCase):
if not sequence:
return []
if len(_lowerCAmelCase) == 1:
return list(_lowerCAmelCase)
UpperCamelCase_ = 0
UpperCamelCase_ = len(_lowerCAmelCase) - 1
UpperCamelCase_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCamelCase_ , UpperCamelCase_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCamelCase_ , UpperCamelCase_ = sequence[high], sequence[mid]
high -= 1
else:
UpperCamelCase_ = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(_lowerCAmelCase)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Tuple =input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase : Any =[int(item.strip()) for item in user_input.split(""",""")]
print(F"{dutch_national_flag_sort(unsorted)}")
| 504
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( __A ):
SCREAMING_SNAKE_CASE : List[Any] = 'new-model'
if is_tf_available():
class _snake_case ( __A ):
SCREAMING_SNAKE_CASE : Optional[int] = NewModelConfig
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = """bert-base-cased"""
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = """bert-base-cased"""
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModelForPreTraining.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(snake_case_ )
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(snake_case_ )
lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ )
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
@require_tensorflow_probability
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCAmelCase = AutoConfig.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case_ )
lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 1_44_10 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=snake_case_ ) , 1_44_10 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(snake_case_ , snake_case_ )
lowerCAmelCase = copy.deepcopy(model.config )
lowerCAmelCase = ["""FunnelBaseModel"""]
lowerCAmelCase = TFAutoModel.from_config(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ )
lowerCAmelCase = TFAutoModel.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
AutoConfig.register('new-model' , snake_case_ )
lowerCAmelCase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case_ ):
auto_class.register(snake_case_ , snake_case_ )
auto_class.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
auto_class.register(snake_case_ , snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase = BertModelTester(self ).get_config()
lowerCAmelCase = NewModelConfig(**tiny_config.to_dict() )
lowerCAmelCase = auto_class.from_config(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ )
lowerCAmelCase = auto_class.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase = TFAutoModel.from_pretrained('bert-base' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase = TFAutoModel.from_pretrained(snake_case_ , revision='aaaaaa' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
lowerCAmelCase = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaisesRegex(snake_case_ , 'Use `from_pt=True` to load this model' ):
lowerCAmelCase = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowerCAmelCase = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCAmelCase = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowerCAmelCase = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 284
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = 'sew'
def __init__( self , snake_case_=32 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_=2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_=False , snake_case_=128 , snake_case_=16 , snake_case_=True , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=256 , snake_case_=0 , snake_case_=1 , snake_case_=2 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
A__ : Dict = hidden_size
A__ : Dict = feat_extract_norm
A__ : str = feat_extract_activation
A__ : Optional[Any] = list(snake_case_ )
A__ : str = list(snake_case_ )
A__ : Any = list(snake_case_ )
A__ : Any = conv_bias
A__ : Any = num_conv_pos_embeddings
A__ : Any = num_conv_pos_embedding_groups
A__ : str = len(self.conv_dim )
A__ : Tuple = num_hidden_layers
A__ : int = intermediate_size
A__ : Union[str, Any] = squeeze_factor
A__ : Union[str, Any] = hidden_act
A__ : List[str] = num_attention_heads
A__ : List[str] = hidden_dropout
A__ : Dict = attention_dropout
A__ : Tuple = activation_dropout
A__ : Optional[int] = feat_proj_dropout
A__ : Optional[Any] = final_dropout
A__ : int = layerdrop
A__ : List[Any] = layer_norm_eps
A__ : int = initializer_range
A__ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Optional[Any] = apply_spec_augment
A__ : int = mask_time_prob
A__ : Tuple = mask_time_length
A__ : Optional[Any] = mask_time_min_masks
A__ : Any = mask_feature_prob
A__ : List[Any] = mask_feature_length
A__ : Any = mask_feature_min_masks
# ctc loss
A__ : str = ctc_loss_reduction
A__ : List[Any] = ctc_zero_infinity
# sequence classification
A__ : Union[str, Any] = use_weighted_layer_sum
A__ : str = classifier_proj_size
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 363
| 0
|
a_ :int = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
a_ :Optional[int] = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowercase_ (A : str ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowercase_ (A : str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowercase_ ():
snake_case__ : Optional[Any] = 'Morse code here!'
print(A )
snake_case__ : Any = encrypt(A )
print(A )
snake_case__ : Any = decrypt(A )
print(A )
if __name__ == "__main__":
main()
| 243
|
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""note_seq"""]
def __init__( self : List[Any], *_snake_case : Dict, **_snake_case : List[Any] ) ->Optional[int]:
requires_backends(self, ['note_seq'] )
@classmethod
def lowercase_ ( cls : int, *_snake_case : Tuple, **_snake_case : Optional[Any] ) ->int:
requires_backends(cls, ['note_seq'] )
@classmethod
def lowercase_ ( cls : Optional[int], *_snake_case : str, **_snake_case : Tuple ) ->int:
requires_backends(cls, ['note_seq'] )
| 243
| 1
|
from functools import reduce
UpperCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _snake_case , _snake_case : str(int(_snake_case ) * int(_snake_case ) ) , n[i : i + 13] ) )
for i in range(len(_snake_case ) - 12 ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a_ ( __lowerCAmelCase ):
lowerCAmelCase__ = model.config
lowerCAmelCase__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
lowerCAmelCase__ = MBartConfig(
is_decoder=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , add_cross_attention=__lowerCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__lowerCAmelCase , add_final_layer_norm=__lowerCAmelCase , )
return encoder_config, decoder_config
def a_ ( __lowerCAmelCase ):
if "encoder.model" in name:
lowerCAmelCase__ = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
lowerCAmelCase__ = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
lowerCAmelCase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
lowerCAmelCase__ = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
lowerCAmelCase__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase__ = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase__ = '''encoder.layernorm.bias'''
return name
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
lowerCAmelCase__ = key.split('''.''' )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = int(key_split[5] )
lowerCAmelCase__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCAmelCase__ = val
return orig_state_dict
def a_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
# load original model
lowerCAmelCase__ = DonutModel.from_pretrained(__lowerCAmelCase ).eval()
# load HuggingFace model
lowerCAmelCase__ , lowerCAmelCase__ = get_configs(__lowerCAmelCase )
lowerCAmelCase__ = DonutSwinModel(__lowerCAmelCase )
lowerCAmelCase__ = MBartForCausalLM(__lowerCAmelCase )
lowerCAmelCase__ = VisionEncoderDecoderModel(encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
model.eval()
lowerCAmelCase__ = original_model.state_dict()
lowerCAmelCase__ = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify results on scanned document
lowerCAmelCase__ = load_dataset('''hf-internal-testing/example-documents''' )
lowerCAmelCase__ = dataset['''test'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained(__lowerCAmelCase , from_slow=__lowerCAmelCase )
lowerCAmelCase__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCAmelCase__ = DonutProcessor(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCAmelCase__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCAmelCase__ = '''When is the coffee break?'''
lowerCAmelCase__ = task_prompt.replace('''{user_input}''' , __lowerCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCAmelCase__ = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCAmelCase__ = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCAmelCase__ = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCAmelCase__ = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCAmelCase__ = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
lowerCAmelCase__ = original_model.decoder.tokenizer(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
lowerCAmelCase__ = original_model.encoder.model.patch_embed(__lowerCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = model.encoder.embeddings(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
# verify encoder hidden states
lowerCAmelCase__ = original_model.encoder(__lowerCAmelCase )
lowerCAmelCase__ = model.encoder(__lowerCAmelCase ).last_hidden_state
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
# verify decoder hidden states
lowerCAmelCase__ = original_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).logits
lowerCAmelCase__ = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
__magic_name__ : Any = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 615
| 0
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
__a : Optional[int] = 5
__a : Tuple = 10
@require_sentencepiece
@require_tokenizers
class A ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = SpeechaTextTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : int = True
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
super().setUp()
UpperCamelCase_ = sp.SentencePieceProcessor()
spm_model.Load(__UpperCAmelCase )
UpperCamelCase_ = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__UpperCAmelCase ) )]
UpperCamelCase_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCamelCase_ = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
UpperCamelCase_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = '<pad>'
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__UpperCAmelCase ) , 1001 )
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [289, 50, 14, 174, 386] , )
UpperCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = '''valhalla/s2t_mustc_multilinguial_medium'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''C\'est trop cool'''
_SCREAMING_SNAKE_CASE : int = '''Esto es genial'''
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCamelCase_ = [ES_CODE, 4, 1601, 47, 7647, 2]
UpperCamelCase_ = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
UpperCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = 'fr'
UpperCamelCase_ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __UpperCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
UpperCamelCase_ = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 708
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__a : Optional[int] = None
__a : List[Any] = logging.get_logger(__name__)
__a : int = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__a : int = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
__a : Tuple = """▁"""
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = BigBirdTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]="<unk>" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : Optional[Any]="[MASK]" , __UpperCAmelCase : List[str]="[CLS]" , **__UpperCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = False if not self.vocab_file else True
def lowercase__ ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase__ ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 559
| 0
|
from __future__ import annotations
import os
from collections.abc import Mapping
SCREAMING_SNAKE_CASE__ : Dict = tuple[int, int]
class snake_case :
def __init__( self : Tuple , a_ : set[int] , a_ : Mapping[EdgeT, int] )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : set[int] = vertices
SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] = {
(min(a_ ), max(a_ )): weight for edge, weight in edges.items()
}
def __lowercase( self : Optional[int] , a_ : EdgeT , a_ : int )-> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE__ : Any = weight
def __lowercase( self : Optional[Any] )-> Graph:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Graph = Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE__ : EdgeT
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : EdgeT
SCREAMING_SNAKE_CASE__ : int
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE__ : Any = edge
SCREAMING_SNAKE_CASE__ : Tuple = weight
subgraph.add_edge(a_ , a_ )
return subgraph
def _a ( lowercase__ : str = "p107_network.txt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = os.path.abspath(os.path.dirname(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : str = os.path.join(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] = {}
SCREAMING_SNAKE_CASE__ : list[str]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
with open(lowercase__ ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f.read().strip().split('\n' )
SCREAMING_SNAKE_CASE__ : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE__ : Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
SCREAMING_SNAKE_CASE__ : Graph = graph.prims_algorithm()
SCREAMING_SNAKE_CASE__ : int = sum(graph.edges.values() )
SCREAMING_SNAKE_CASE__ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85
|
def _a ( lowercase__ : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ : str = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_)
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , *__a , **__a ):
'''simple docstring'''
super().__init__(*__a , **__a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _a (self , __a=None ):
'''simple docstring'''
lowerCamelCase = {}
if top_k is not None:
lowerCamelCase = top_k
return {}, {}, postprocess_params
def __call__(self , __a , **__a ):
'''simple docstring'''
return super().__call__(__a , **__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = load_image(__a )
lowerCamelCase = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.model(**__a )
return model_outputs
def _a (self , __a , __a=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
lowerCamelCase , lowerCamelCase = probs.topk(__a )
elif self.framework == "tf":
lowerCamelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCamelCase = tf.math.top_k(__a , k=__a )
lowerCamelCase , lowerCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCamelCase = scores.tolist()
lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 484
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : List[str] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.