code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
snake_case_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
snake_case_ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
snake_case_ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case_ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
snake_case_ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class _UpperCamelCase :
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
snake_case_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case_ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
snake_case_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__lowerCAmelCase = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = train_dataset.features["label"].names
if training_args.do_eval:
__lowerCAmelCase = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = eval_dataset.features["label"].names
if training_args.do_predict:
__lowerCAmelCase = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = predict_dataset.features["label"].names
# Labels
__lowerCAmelCase = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel={str(UpperCamelCase__ ): label for i, label in enumerate(UpperCamelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
def preprocess_function(__snake_case ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=UpperCamelCase__ , max_length=data_args.max_seq_length , truncation=UpperCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__lowerCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_predict_samples )
__lowerCAmelCase = predict_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
__lowerCAmelCase = predict_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
__lowerCAmelCase = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__snake_case ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions
__lowerCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
__lowerCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=UpperCamelCase__ )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
__lowerCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = trainer.predict(UpperCamelCase__ , metric_key_prefix="predict" )
__lowerCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase__ )
)
__lowerCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("predict" , UpperCamelCase__ )
trainer.save_metrics("predict" , UpperCamelCase__ )
__lowerCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(UpperCamelCase__ ):
__lowerCAmelCase = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 367
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 6
| 0
|
'''simple docstring'''
from random import randint, random
def A_ ( snake_case , snake_case , snake_case , snake_case = False , snake_case = False , snake_case = 5 , ):
SCREAMING_SNAKE_CASE:List[str] = [[-1] * number_of_cells] # Create a highway without any car
SCREAMING_SNAKE_CASE:str = 0
SCREAMING_SNAKE_CASE:Tuple = max(snake_case , 0 )
while i < number_of_cells:
SCREAMING_SNAKE_CASE:Optional[Any] = (
randint(0 , snake_case ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(snake_case ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case , -1 )
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = len(snake_case )
# Beforce calculations, the highway is empty
SCREAMING_SNAKE_CASE:Optional[Any] = [-1] * number_of_cells
for car_index in range(snake_case ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
SCREAMING_SNAKE_CASE:Union[str, Any] = min(highway_now[car_index] + 1 , snake_case )
# Number of empty cell before the next car
SCREAMING_SNAKE_CASE:Dict = get_distance(snake_case , snake_case ) - 1
# We can't have the car causing an accident
SCREAMING_SNAKE_CASE:List[Any] = min(next_highway[car_index] , snake_case )
if random() < probability:
# Randomly, a driver will slow down
SCREAMING_SNAKE_CASE:Union[str, Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def A_ ( snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Any = len(highway[0] )
for i in range(snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = update(highway[i] , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Any = [-1] * number_of_cells
for car_index in range(snake_case ):
SCREAMING_SNAKE_CASE:Any = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
SCREAMING_SNAKE_CASE:List[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
SCREAMING_SNAKE_CASE:Optional[int] = speed
highway.append(snake_case )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 465
|
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Dict = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( snake_case = 5000 ):
SCREAMING_SNAKE_CASE:int = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case )]
for i, pentagonal_i in enumerate(snake_case ):
for j in range(snake_case , len(snake_case ) ):
SCREAMING_SNAKE_CASE:int = pentagonal_nums[j]
SCREAMING_SNAKE_CASE:Any = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE:int = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case ) and is_pentagonal(snake_case ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 465
| 1
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a :Optional[int] = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Any ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(__UpperCamelCase ) ,version.parse(__UpperCamelCase ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ = f'''\n{hint}''' if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$" ,__UpperCamelCase ):
A_ , A_ , A_ = requirement, None, None
else:
A_ = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" ,__UpperCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f''' got {requirement}''' )
A_ , A_ = match[0]
A_ = want_full.split("," ) # there could be multiple requirements
A_ = {}
for w in want_range:
A_ = re.findall(R"^([\s!=<>]{1,2})(.+)" ,__UpperCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f''' but got {requirement}''' )
A_ , A_ = match[0]
A_ = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
A_ = ".".join([str(__UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return
# check if any version is installed
try:
A_ = importlib.metadata.version(__UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__UpperCamelCase ,__UpperCamelCase )
| 86
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = "swin"
_UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,_A=224 ,_A=4 ,_A=3 ,_A=96 ,_A=[2, 2, 6, 2] ,_A=[3, 6, 12, 24] ,_A=7 ,_A=4.0 ,_A=True ,_A=0.0 ,_A=0.0 ,_A=0.1 ,_A="gelu" ,_A=False ,_A=0.0_2 ,_A=1E-5 ,_A=32 ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Union[str, Any] = embed_dim
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : Any = len(_A )
_lowerCAmelCase : Optional[Any] = num_heads
_lowerCAmelCase : List[Any] = window_size
_lowerCAmelCase : str = mlp_ratio
_lowerCAmelCase : Dict = qkv_bias
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
_lowerCAmelCase : List[str] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_A ) + 1 )]
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 259
| 0
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase = field(
default=snake_case__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCAmelCase = field(
default=snake_case__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase = field(
default=snake_case__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase = field(
default=snake_case__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase = field(
default=snake_case__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase = field(
default=snake_case__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , __SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : str = training_args.get_process_log_level()
logger.setLevel(__SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase__ : Dict = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase__ : List[str] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Optional[Any] = train_dataset.features["label"].names
if training_args.do_eval:
lowerCamelCase__ : Optional[Any] = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCamelCase__ : Tuple = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : int = predict_dataset.features["label"].names
# Labels
lowerCamelCase__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__SCREAMING_SNAKE_CASE , idalabel={str(__SCREAMING_SNAKE_CASE ): label for i, label in enumerate(__SCREAMING_SNAKE_CASE )} , labelaid={label: i for i, label in enumerate(__SCREAMING_SNAKE_CASE )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase__ : Tuple = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase__ : List[str] = False
def preprocess_function(_lowercase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=__SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length , truncation=__SCREAMING_SNAKE_CASE , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__ : str = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
lowerCamelCase__ : Tuple = train_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase__ : Any = train_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__ : int = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
lowerCamelCase__ : Any = eval_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase__ : Union[str, Any] = eval_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase__ : List[str] = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_predict_samples )
lowerCamelCase__ : Tuple = predict_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowerCamelCase__ : List[str] = predict_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowerCamelCase__ : int = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase ):
lowerCamelCase__ : int = p.predictions[0] if isinstance(p.predictions , __SCREAMING_SNAKE_CASE ) else p.predictions
lowerCamelCase__ : int = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase__ : Tuple = default_data_collator
elif training_args.fpaa:
lowerCamelCase__ : Any = DataCollatorWithPadding(__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
lowerCamelCase__ : List[Any] = None
# Initialize our Trainer
lowerCamelCase__ : List[str] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCamelCase__ : Dict = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Tuple = last_checkpoint
lowerCamelCase__ : Tuple = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Tuple = train_result.metrics
lowerCamelCase__ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__SCREAMING_SNAKE_CASE )
)
lowerCamelCase__ : Dict = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , __SCREAMING_SNAKE_CASE )
trainer.save_metrics('''train''' , __SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : List[str] = trainer.evaluate(eval_dataset=__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : str = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('''eval''' , __SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , __SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCamelCase__ : int = trainer.predict(__SCREAMING_SNAKE_CASE , metric_key_prefix='''predict''' )
lowerCamelCase__ : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__SCREAMING_SNAKE_CASE )
)
lowerCamelCase__ : Optional[int] = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('''predict''' , __SCREAMING_SNAKE_CASE )
trainer.save_metrics('''predict''' , __SCREAMING_SNAKE_CASE )
lowerCamelCase__ : str = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
lowerCamelCase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Tuple = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {"vocab_file": "spm_char.model"}
UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
UpperCAmelCase : Tuple = {
"microsoft/speecht5_asr": 1024,
"microsoft/speecht5_tts": 1024,
"microsoft/speecht5_vc": 1024,
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :Union[str, Any] ,__UpperCAmelCase :Dict ,__UpperCAmelCase :List[Any]="<s>" ,__UpperCAmelCase :Optional[int]="</s>" ,__UpperCAmelCase :List[Any]="<unk>" ,__UpperCAmelCase :Tuple="<pad>" ,__UpperCAmelCase :Optional[Dict[str, Any]] = None ,**__UpperCAmelCase :Optional[int] ,) -> None:
"""simple docstring"""
lowerCamelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCamelCase__ : Union[str, Any] = vocab_file
lowerCamelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowercase_ ( self :Optional[int] ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ ( self :Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Tuple = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = self.__dict__.copy()
lowerCamelCase__ : int = None
return state
def __setstate__( self :Union[str, Any] ,__UpperCAmelCase :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCamelCase__ : str = {}
lowerCamelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self :Any ,__UpperCAmelCase :str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def lowercase_ ( self :str ,__UpperCAmelCase :List[Any] ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(__UpperCAmelCase )
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase__ : Dict = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCamelCase__ : Union[str, Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowercase_ ( self :Tuple ,__UpperCAmelCase :int ,__UpperCAmelCase :Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self :List[Any] ,__UpperCAmelCase :List[int] ,__UpperCAmelCase :Optional[List[int]] = None ,__UpperCAmelCase :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
lowerCamelCase__ : Dict = [1]
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + suffix_ones
return ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowercase_ ( self :List[str] ,__UpperCAmelCase :str ,__UpperCAmelCase :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ : List[Any] = os.path.join(
__UpperCAmelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,'''wb''' ) as fi:
lowerCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 121
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=True ) -> str:
'''simple docstring'''
model.train()
A = model(snake_case__ )
A = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict=False ) -> List[str]:
'''simple docstring'''
set_seed(42 )
A = RegressionModel()
A = deepcopy(snake_case__ )
A = RegressionDataset(length=80 )
A = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
A = AdamW(params=model.parameters() , lr=1E-3 )
A = AdamW(params=ddp_model.parameters() , lr=1E-3 )
A = LambdaLR(snake_case__ , lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
A = LambdaLR(snake_case__ , lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
A , A , A , A = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
A , A = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase_ ( lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
A , A , A = get_training_setup(snake_case__ )
# Use a single batch
A , A = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A , A = accelerator.gather((ddp_input, ddp_target) )
A , A = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A = ddp_input[torch.randperm(len(snake_case__ ) )]
def lowerCamelCase_ ( lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
A , A , A = get_training_setup(snake_case__ )
# Use a single batch
A , A = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A , A = accelerator.gather((ddp_input, ddp_target) )
A , A = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A = ddp_input[torch.randperm(len(snake_case__ ) )]
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=False ) -> Tuple:
'''simple docstring'''
A = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A , A , A = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
A , A = batch.values()
# Gather the distributed inputs and targs for the base model
A , A = accelerator.gather((ddp_input, ddp_target) )
A , A = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def lowerCamelCase_ ( lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Optional[int]=False ) -> int:
'''simple docstring'''
A = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A , A , A , A , A , A , A = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
A , A = batch.values()
# Gather the distributed inputs and targs for the base model
A , A = accelerator.gather((ddp_input, ddp_target) )
A , A = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
A = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
A = Accelerator()
A = RegressionDataset(length=80 )
A = DataLoader(snake_case__ , batch_size=16 )
A = RegressionDataset(length=96 )
A = DataLoader(snake_case__ , batch_size=16 )
A , A = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = Accelerator()
A = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 106
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int ):
super().__init__()
lowerCAmelCase = nn.Linear(3 , 4 )
lowerCAmelCase = nn.BatchNormad(4 )
lowerCAmelCase = nn.Linear(4 , 5 )
def __lowercase ( self : int , lowerCAmelCase : int ):
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) )
class SCREAMING_SNAKE_CASE__ ( _a ):
def __lowercase ( self : int , lowerCAmelCase : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : int ):
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE__ ( _a ):
def __lowercase ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ):
return output + 1
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
lowerCAmelCase = ModelForTest()
lowerCAmelCase = ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(test_model._hf_hook , lowerCAmelCase )
self.assertTrue(hasattr(lowerCAmelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCAmelCase , """_old_forward""" ) )
def __lowercase ( self : int ):
lowerCAmelCase = ModelForTest()
lowerCAmelCase = ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCAmelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCAmelCase , """_old_forward""" ) )
def __lowercase ( self : List[str] ):
lowerCAmelCase = ModelForTest()
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = test_model(x + 1 )
lowerCAmelCase = test_model(x + 2 )
lowerCAmelCase = PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase = PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-5 )
def __lowercase ( self : Tuple ):
lowerCAmelCase = ModelForTest()
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = test_model(lowerCAmelCase )
lowerCAmelCase = PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase = PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , output + 2 , atol=1e-5 )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = ModelForTest()
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = test_model(lowerCAmelCase )
lowerCAmelCase = PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase = True
lowerCAmelCase = test_model(lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __lowercase ( self : Tuple ):
lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) )
lowerCAmelCase = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase = model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def __lowercase ( self : int ):
lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCAmelCase = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def __lowercase ( self : Tuple ):
lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase = torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def __lowercase ( self : str ):
lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCAmelCase = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase = torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCAmelCase = torch.randn(2 , 3 )
lowerCAmelCase = model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 169
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
A = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
A = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
A = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 234
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : int ) -> str:
_lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_lowerCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCamelCase = bertabert.config.encoder.vocab_size
_lowerCamelCase = tokenizer.sep_token_id
_lowerCamelCase = tokenizer.cls_token_id
_lowerCamelCase = 1_2_8
_lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_lowerCamelCase = train_dataset.select(range(3_2 ) )
_lowerCamelCase = val_dataset.select(range(1_6 ) )
_lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(snake_case__ : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=snake_case__ , max_length=5_1_2 )
_lowerCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=snake_case__ , max_length=1_2_8 )
_lowerCamelCase = inputs.input_ids
_lowerCamelCase = inputs.attention_mask
_lowerCamelCase = outputs.input_ids
_lowerCamelCase = outputs.input_ids.copy()
_lowerCamelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_lowerCamelCase = outputs.attention_mask
assert all(len(snake_case__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(snake_case__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(snake_case__ : Union[str, Any] ):
_lowerCamelCase = pred.label_ids
_lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(snake_case__ ) )] ) / len(snake_case__ )
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case__ , batch_size=snake_case__ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case__ , batch_size=snake_case__ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=snake_case__ , per_device_train_batch_size=snake_case__ , per_device_eval_batch_size=snake_case__ , predict_with_generate=snake_case__ , evaluation_strategy='steps' , do_train=snake_case__ , do_eval=snake_case__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase = SeqaSeqTrainer(
model=snake_case__ , args=snake_case__ , compute_metrics=_compute_metrics , train_dataset=snake_case__ , eval_dataset=snake_case__ , tokenizer=snake_case__ , )
# start training
trainer.train()
| 234
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _lowerCamelCase( _a ):
lowercase_ : int = """ctrl"""
lowercase_ : List[Any] = ["""past_key_values"""]
lowercase_ : Any = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, lowerCamelCase=24_65_34, lowerCamelCase=2_56, lowerCamelCase=12_80, lowerCamelCase=81_92, lowerCamelCase=48, lowerCamelCase=16, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=1E-6, lowerCamelCase=0.0_2, lowerCamelCase=True, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = vocab_size
_lowercase : Optional[Any] = n_positions
_lowercase : int = n_embd
_lowercase : Union[str, Any] = n_layer
_lowercase : Tuple = n_head
_lowercase : Any = dff
_lowercase : Tuple = resid_pdrop
_lowercase : List[str] = embd_pdrop
_lowercase : Dict = layer_norm_epsilon
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = use_cache
super().__init__(**lowerCamelCase)
| 89
|
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89
| 1
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCamelCase__ ( __lowerCAmelCase ):
def __lt__( self : Dict , lowerCamelCase : Any ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self : str , lowerCamelCase : List[str] ):
'''simple docstring'''
return self[-1] == other[-1]
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
a__ = []
# sort into stacks
for element in collection:
a__ = Stack([element] )
a__ = bisect_left(__lowerCamelCase , __lowerCamelCase )
if i != len(__lowerCamelCase ):
stacks[i].append(__lowerCamelCase )
else:
stacks.append(__lowerCamelCase )
# use a heap-based merge to merge stack efficiently
a__ = merge(*(reversed(__lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ : Tuple = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 289
|
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : int = 400_0000 ) -> int:
a__ = [0, 1]
a__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
a__ = 0
for j in range(len(__lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 289
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_attention_mask
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_choices
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : str = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name.from_pretrained('roberta-base' , from_pt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 101
| 0
|
'''simple docstring'''
import sys
a : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = 1
for digit in s:
product *= int(__magic_name__ )
return product
def lowercase ( __magic_name__ = N ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = -sys.maxsize - 1
UpperCAmelCase : List[Any] = n[:13]
UpperCAmelCase : Optional[int] = 13
while cur_index < len(__magic_name__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCAmelCase : Any = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCAmelCase : List[Any] = max(__magic_name__ , str_eval(__magic_name__ ) )
UpperCAmelCase : List[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 609
|
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ResnetDownsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[Any] = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = AttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = CrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : str = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[int] = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : List[str] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = SkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AttnSkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = AttnDownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = UNetMidBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "mid"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UNetMidBlockaDCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[int] = "mid"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "mid"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = UpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ResnetUpsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = CrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Any = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = SimpleCrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case , include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Optional[int] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = SkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = UpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase : Any = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AttnUpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(snake_case )
| 609
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=3_0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=2 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=None , __magic_name__=2 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = num_patches + 2
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModel(config=__magic_name__ )
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTForMaskedImageModeling(config=__magic_name__ )
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = TFDeiTForMaskedImageModeling(__magic_name__ )
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = TFDeiTForImageClassification(__magic_name__ )
_lowerCAmelCase = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = TFDeiTForImageClassification(__magic_name__ )
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase : Optional[Any] = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase : List[str] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Any = False
UpperCamelCase : List[Any] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , tf.keras.layers.Dense ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__magic_name__ )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
_lowerCAmelCase = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDeiTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__magic_name__ , return_tensors='tf' )
# forward pass
_lowerCAmelCase = model(**__magic_name__ )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_lowerCAmelCase = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 589
|
"""simple docstring"""
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(__lowerCamelCase, __lowerCamelCase ) ) )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if point:
if isinstance(__lowerCamelCase, __lowerCamelCase ):
for item in point:
if not isinstance(__lowerCamelCase, (int, float) ):
_lowerCAmelCase = (
'Expected a list of numbers as input, found '
F'''{type(__lowerCamelCase ).__name__}'''
)
raise TypeError(__lowerCamelCase )
else:
_lowerCAmelCase = F'''Expected a list of numbers as input, found {type(__lowerCamelCase ).__name__}'''
raise TypeError(__lowerCamelCase )
else:
raise ValueError('Missing an input' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(__lowerCamelCase, __lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 589
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
for param in module.parameters():
UpperCAmelCase_ = False
def snake_case__ ( ) -> Optional[int]:
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ = plt.imshow(__lowerCAmelCase )
fig.axes.get_xaxis().set_visible(__lowerCAmelCase )
fig.axes.get_yaxis().set_visible(__lowerCAmelCase )
plt.show()
def snake_case__ ( ) -> int:
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 709
|
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = """longformer"""
def __init__( self :List[str] , lowerCamelCase__ :Union[List[int], int] = 5_12 , lowerCamelCase__ :int = 2 , lowerCamelCase__ :int = 1 , lowerCamelCase__ :int = 0 , lowerCamelCase__ :int = 2 , lowerCamelCase__ :int = 3_05_22 , lowerCamelCase__ :int = 7_68 , lowerCamelCase__ :int = 12 , lowerCamelCase__ :int = 12 , lowerCamelCase__ :int = 30_72 , lowerCamelCase__ :str = "gelu" , lowerCamelCase__ :float = 0.1 , lowerCamelCase__ :float = 0.1 , lowerCamelCase__ :int = 5_12 , lowerCamelCase__ :int = 2 , lowerCamelCase__ :float = 0.02 , lowerCamelCase__ :float = 1e-12 , lowerCamelCase__ :bool = False , **lowerCamelCase__ :Tuple , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :List[Any] = attention_window
UpperCamelCase__ :Union[str, Any] = sep_token_id
UpperCamelCase__ :Tuple = bos_token_id
UpperCamelCase__ :Optional[Any] = eos_token_id
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :str = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :Any = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Any = type_vocab_size
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :Optional[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = onnx_export
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :"PretrainedConfig" , lowerCamelCase__ :str = "default" , lowerCamelCase__ :"List[PatchingSpec]" = None ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Any = True
@property
def __a ( self :Optional[Any] ):
if self.task == "multiple-choice":
UpperCamelCase__ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def __a ( self :List[str] ):
UpperCamelCase__ :int = super().outputs
if self.task == "default":
UpperCamelCase__ :Tuple = {0: """batch"""}
return outputs
@property
def __a ( self :List[Any] ):
return 1e-4
@property
def __a ( self :Tuple ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __a ( self :List[Any] , lowerCamelCase__ :"PreTrainedTokenizerBase" , lowerCamelCase__ :int = -1 , lowerCamelCase__ :int = -1 , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[TensorType] = None , ):
UpperCamelCase__ :Any = super().generate_dummy_inputs(
preprocessor=lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCamelCase__ :Union[str, Any] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
UpperCamelCase__ :Union[str, Any] = 1
return inputs
| 45
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45
| 1
|
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
return base * power(__magic_name__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
_SCREAMING_SNAKE_CASE : int = int(input('Enter the base: ').strip())
_SCREAMING_SNAKE_CASE : List[Any] = int(input('Enter the exponent: ').strip())
_SCREAMING_SNAKE_CASE : Optional[int] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_SCREAMING_SNAKE_CASE : Optional[int] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 206
|
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
_lowercase: Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowercase: str = min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 206
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : str ) -> list[int]:
return [ord(_lowerCamelCase ) - 96 for elem in plain]
def lowerCamelCase__ ( _lowerCamelCase : list[int] ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , _lowerCamelCase )
print('Decoded:' , decode(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 549
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowercase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_lowercase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowercase = '''allenai'''
def _A (UpperCamelCase : Tuple ) ->Any:
'''simple docstring'''
lowerCamelCase__ : str = dict((re.sub(r"""@@$""" , """""" , UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , UpperCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : List[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
lowerCamelCase__ : List[str] = d[k] # restore
return da
def _A (UpperCamelCase : str , UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
assert os.path.exists(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
lowerCamelCase__ : str = basename(UpperCamelCase )
lowerCamelCase__ : str = dirname(UpperCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : Tuple = cls.hub_models()
lowerCamelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowerCamelCase__ : List[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}" )
lowerCamelCase__ : List[str] = hub_utils.from_pretrained(
UpperCamelCase , UpperCamelCase , UpperCamelCase , archive_map=UpperCamelCase , **UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = vars(chkpt["""args"""]["""model"""] )
lowerCamelCase__ : Union[str, Any] = args["""source_lang"""]
lowerCamelCase__ : Dict = args["""target_lang"""]
lowerCamelCase__ : List[Any] = dirname(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(UpperCamelCase )
# dicts
lowerCamelCase__ : int = os.path.join(UpperCamelCase , f"dict.{src_lang}.txt" )
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , f"dict.{tgt_lang}.txt" )
lowerCamelCase__ : Any = Dictionary.load(UpperCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , """vocab-src.json""" )
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Dict = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : Tuple = False
break
lowerCamelCase__ : Union[str, Any] = Dictionary.load(UpperCamelCase )
lowerCamelCase__ : List[str] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : str = len(UpperCamelCase )
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , """vocab-tgt.json""" )
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.exists(UpperCamelCase ):
break
with open(UpperCamelCase , encoding="""utf-8""" ) as fin:
lowerCamelCase__ : Dict = fin.read()
lowerCamelCase__ : Tuple = re.sub(r""" \d+$""" , """""" , UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"Generating {merges_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase )
# model config
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
lowerCamelCase__ : str = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowerCamelCase__ : Dict = 5
lowerCamelCase__ : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowerCamelCase__ : Union[str, Any] = 1.0
print(f"Generating {fsmt_model_config_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# tokenizer config
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# model
lowerCamelCase__ : Tuple = chkpt["""models"""][0]
lowerCamelCase__ : Tuple = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : Tuple = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = FSMTConfig.from_pretrained(UpperCamelCase )
lowerCamelCase__ : List[str] = FSMTForConditionalGeneration(UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
# save
lowerCamelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(UpperCamelCase , UpperCamelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f"cd {data_root}" )
print(f"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 157
| 0
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
_snake_case = len(__lowerCamelCase )
_snake_case = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_snake_case = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_snake_case = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_snake_case = subset[i - 1][j]
if arr[i - 1] <= j:
_snake_case = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCAmelCase__ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> List[Any]:
_snake_case = {}
state_dict.pop('''pixel_mean''' , __lowerCamelCase )
state_dict.pop('''pixel_std''' , __lowerCamelCase )
_snake_case = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_snake_case = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
_snake_case = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(2 ) )
if layer_nb == 0:
_snake_case = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
_snake_case = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
_snake_case = key.replace('''layers.2''' , '''proj_out''' )
_snake_case = value
_snake_case = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]="ybelkada/segment-anything" ) -> List[str]:
_snake_case = hf_hub_download(__lowerCamelCase , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_snake_case = SamConfig()
elif "sam_vit_l" in model_name:
_snake_case = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_snake_case = SamConfig(
vision_config=__lowerCamelCase , )
elif "sam_vit_h" in model_name:
_snake_case = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_snake_case = SamConfig(
vision_config=__lowerCamelCase , )
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = replace_keys(__lowerCamelCase )
_snake_case = SamImageProcessor()
_snake_case = SamProcessor(image_processor=__lowerCamelCase )
_snake_case = SamModel(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_snake_case = hf_model.to('''cuda''' )
_snake_case = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
_snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' )
_snake_case = [[[4_00, 6_50]]]
_snake_case = [[1]]
_snake_case = processor(images=np.array(__lowerCamelCase ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
_snake_case = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
_snake_case = ((75, 2_75, 17_25, 8_50),)
_snake_case = processor(images=np.array(__lowerCamelCase ) , input_boxes=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
_snake_case = [[[4_00, 6_50], [8_00, 6_50]]]
_snake_case = [[1, 1]]
_snake_case = processor(
images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_snake_case = hf_model(**__lowerCamelCase )
_snake_case = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
UpperCAmelCase__ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
UpperCAmelCase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 430
| 0
|
"""simple docstring"""
import enum
import shutil
import sys
snake_case , snake_case = shutil.get_terminal_size()
snake_case = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class UpperCAmelCase ( enum.Enum ):
A__ : List[Any] = 0
A__ : List[str] = 1
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_="" ) -> List[Any]:
sys.stdout.write(str(lowerCAmelCase_ ) + end )
sys.stdout.flush()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="" ) -> List[Any]:
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , lowerCAmelCase_ )
def snake_case ( ) -> Dict:
forceWrite('''\r''' )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def snake_case ( ) -> Tuple:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def snake_case ( ) -> int:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 103
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase ( yaml.SafeLoader ):
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
_snake_case = [tuple(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else key for key in keys]
_snake_case = Counter(__lowerCamelCase )
_snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
_snake_case = super().construct_mapping(__lowerCamelCase , deep=__lowerCamelCase )
self._check_no_duplicates_on_constructed_node(__lowerCamelCase )
return mapping
def snake_case ( lowerCAmelCase_ ) -> Tuple[Optional[str], str]:
_snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_snake_case = full_content[1:].index('''---''' ) + 1
_snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
# class attributes
A__ : int = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __UpperCAmelCase ( cls : str , __lowerCamelCase : Path ):
"""simple docstring"""
with open(__lowerCamelCase , encoding='''utf-8''' ) as readme_file:
_snake_case , _snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowerCamelCase )
else:
return cls()
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Path ):
"""simple docstring"""
if path.exists():
with open(__lowerCamelCase , encoding='''utf-8''' ) as readme_file:
_snake_case = readme_file.read()
else:
_snake_case = None
_snake_case = self._to_readme(__lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
_snake_case , _snake_case = _split_yaml_from_readme(__lowerCamelCase )
_snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __UpperCAmelCase ( cls : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = yaml.load(__lowerCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowerCamelCase , allow_unicode=__lowerCamelCase , encoding='''utf-8''' , ).decode('''utf-8''' )
snake_case = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
snake_case = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
snake_case = ap.parse_args()
snake_case = Path(args.readme_filepath)
snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 103
| 1
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
a= [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def _UpperCamelCase ( ):
"""simple docstring"""
__UpperCamelCase : List[str] = os.path.dirname(os.path.realpath(_a ) )
__UpperCamelCase : Optional[int] = os.path.join(_a , 'words.txt' )
__UpperCamelCase : Dict = ''
with open(_a ) as f:
__UpperCamelCase : int = f.readline()
__UpperCamelCase : str = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__UpperCamelCase : Optional[Any] = [
word
for word in [sum(ord(_a ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_a )
if __name__ == "__main__":
print(solution())
| 719
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a= ''''''
a= ''''''
a= ''''''
a= 1 # (0 is vertical, 1 is horizontal)
def _UpperCamelCase ( ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : str = get_dataset(_a , _a )
print('Processing...' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = update_image_and_anno(_a , _a , _a )
for index, image in enumerate(_a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase : Tuple = random_chars(3_2 )
__UpperCamelCase : Tuple = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__UpperCamelCase : List[str] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , _a , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(_a )} with {file_name}""" )
__UpperCamelCase : Tuple = []
for anno in new_annos[index]:
__UpperCamelCase : Any = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(_a )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _UpperCamelCase ( _a : str , _a : str ):
"""simple docstring"""
__UpperCamelCase : List[str] = []
__UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(_a , '*.txt' ) ):
__UpperCamelCase : Any = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(_a ) as in_file:
__UpperCamelCase : Tuple = in_file.readlines()
__UpperCamelCase : Dict = os.path.join(_a , f"""{label_name}.jpg""" )
__UpperCamelCase : Optional[Any] = []
for obj_list in obj_lists:
__UpperCamelCase : Optional[Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_a )
labels.append(_a )
return img_paths, labels
def _UpperCamelCase ( _a : list , _a : list , _a : int = 1 ):
"""simple docstring"""
__UpperCamelCase : List[str] = []
__UpperCamelCase : str = []
__UpperCamelCase : str = []
for idx in range(len(_a ) ):
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : List[str] = img_list[idx]
path_list.append(_a )
__UpperCamelCase : Dict = anno_list[idx]
__UpperCamelCase : List[str] = cva.imread(_a )
if flip_type == 1:
__UpperCamelCase : List[Any] = cva.flip(_a , _a )
for bbox in img_annos:
__UpperCamelCase : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCamelCase : Optional[Any] = cva.flip(_a , _a )
for bbox in img_annos:
__UpperCamelCase : List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_a )
new_imgs_list.append(_a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCamelCase ( _a : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase : Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(_a ) for _ in range(_a ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 287
| 0
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowercase ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = """mock-s3-bucket"""
snake_case__ : Tuple = F's3://{mock_bucket}'
snake_case__ : Any = extract_path_from_uri(UpperCAmelCase_)
assert dataset_path.startswith("""s3://""") is False
snake_case__ : List[Any] = """./local/path"""
snake_case__ : Tuple = extract_path_from_uri(UpperCAmelCase_)
assert dataset_path == new_dataset_path
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : str = is_remote_filesystem(UpperCAmelCase_)
assert is_remote is True
snake_case__ : List[Any] = fsspec.filesystem("""file""")
snake_case__ : Optional[int] = is_remote_filesystem(UpperCAmelCase_)
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
snake_case__ : Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case__ : List[str] = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase_)
snake_case__ : int = fsspec.filesystem(compression_fs_class.protocol , fo=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
snake_case__ : Optional[int] = os.path.basename(UpperCAmelCase_)
snake_case__ : str = expected_filename[: expected_filename.rindex(""".""")]
assert fs.glob("""*""") == [expected_filename]
with fs.open(UpperCAmelCase_ , """r""" , encoding="""utf-8""") as f, open(UpperCAmelCase_ , encoding="""utf-8""") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""])
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Tuple = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
snake_case__ : List[str] = compressed_file_paths[protocol]
snake_case__ : Dict = """dataset.jsonl"""
snake_case__ : List[str] = F'{protocol}://{member_file_path}::{compressed_file_path}'
snake_case__ , *snake_case__ : Union[str, Any] = fsspec.get_fs_token_paths(UpperCAmelCase_)
assert fs.isfile(UpperCAmelCase_)
assert not fs.isfile("""non_existing_""" + member_file_path)
@pytest.mark.integration
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = hf_api.dataset_info(UpperCAmelCase_ , token=UpperCAmelCase_)
snake_case__ : Tuple = HfFileSystem(repo_info=UpperCAmelCase_ , token=UpperCAmelCase_)
assert sorted(hffs.glob("""*""")) == [".gitattributes", "data"]
assert hffs.isdir("""data""")
assert hffs.isfile(""".gitattributes""") and hffs.isfile("""data/text_data.txt""")
with open(UpperCAmelCase_) as f:
assert hffs.open("""data/text_data.txt""" , """r""").read() == f.read()
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Optional[int] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(UpperCAmelCase_ , UpperCAmelCase_ , clobber=UpperCAmelCase_)
with pytest.warns(UpperCAmelCase_) as warning_info:
importlib.reload(datasets.filesystems)
assert len(UpperCAmelCase_) == 1
assert (
str(warning_info[0].message)
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 648
|
def _lowercase ( UpperCAmelCase_=28_123):
"""simple docstring"""
snake_case__ : Dict = [1] * (limit + 1)
for i in range(2 , int(limit**0.5) + 1):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1):
sum_divs[k * i] += k + i
snake_case__ : Union[str, Any] = set()
snake_case__ : Union[str, Any] = 0
for n in range(1 , limit + 1):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase_)
if not any((n - a in abundants) for a in abundants):
res += n
return res
if __name__ == "__main__":
print(solution())
| 648
| 1
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
UpperCamelCase = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 544
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "upernet"
def __init__(self , __a=None , __a=5_12 , __a=0.02 , __a=[1, 2, 3, 6] , __a=True , __a=0.4 , __a=3_84 , __a=2_56 , __a=1 , __a=False , __a=2_55 , **__a , ) -> Tuple:
super().__init__(**__a )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(__a , __a ):
UpperCamelCase = backbone_config.get("model_type" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(__a )
UpperCamelCase = backbone_config
UpperCamelCase = hidden_size
UpperCamelCase = initializer_range
UpperCamelCase = pool_scales
UpperCamelCase = use_auxiliary_head
UpperCamelCase = auxiliary_loss_weight
UpperCamelCase = auxiliary_in_channels
UpperCamelCase = auxiliary_channels
UpperCamelCase = auxiliary_num_convs
UpperCamelCase = auxiliary_concat_input
UpperCamelCase = loss_ignore_index
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 544
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 449
|
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[str] , lowerCamelCase_: List[Any]=False ):
"""simple docstring"""
try:
snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
snake_case : Dict = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
A = parse_flag_from_env('RUN_SLOW', default=False)
A = parse_flag_from_env('RUN_REMOTE', default=False)
A = parse_flag_from_env('RUN_LOCAL', default=True)
A = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
A = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
snake_case : Dict = unittest.skip("test requires faiss" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
snake_case : List[Any] = unittest.skip("test requires regex" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
snake_case : Tuple = unittest.skip("test requires elasticsearch" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
snake_case : Dict = unittest.skip("test requires sqlalchemy" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[Any] ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
snake_case : Dict = unittest.skip("test requires PyTorch" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
if not config.TF_AVAILABLE:
snake_case : Tuple = unittest.skip("test requires TensorFlow" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[str] ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
snake_case : str = unittest.skip("test requires JAX" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
snake_case : Union[str, Any] = unittest.skip("test requires Pillow" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[str] ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
def _require_spacy_model(lowerCamelCase_: Dict ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase_ )
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCamelCase_ ) )(lowerCamelCase_ )
else:
return test_case
return _require_spacy_model
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(lowerCamelCase_ )
else:
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
snake_case : Dict = unittest.skip("test is slow" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
snake_case : int = unittest.skip("test is local" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case : List[str] = unittest.skip("test is packaged" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
snake_case : Union[str, Any] = unittest.skip("test requires remote" )(lowerCamelCase_ )
return test_case
def __SCREAMING_SNAKE_CASE ( *lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
def decorate(cls: int ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase_ ) and name.startswith("test" ):
for decorator in decorators:
snake_case : Optional[Any] = decorator(lowerCamelCase_ )
setattr(cls , lowerCamelCase_ , lowerCamelCase_ )
return cls
return decorate
class _a ( SCREAMING_SNAKE_CASE__):
pass
class _a ( SCREAMING_SNAKE_CASE__):
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
@contextmanager
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase_: Dict=1e-16 ):
"""simple docstring"""
snake_case : Union[str, Any] = requests.Session().request
def timeout_request(lowerCamelCase_: Tuple , lowerCamelCase_: List[Any] , lowerCamelCase_: str , **lowerCamelCase_: List[Any] ):
# Change the url to an invalid url so that the connection hangs
snake_case : Optional[int] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
snake_case : str = timeout
try:
return online_request(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case : Dict = url
snake_case : Union[str, Any] = e.args[0]
snake_case : Any = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),)
snake_case : List[Any] = (max_retry_error,)
raise
def raise_connection_error(lowerCamelCase_: Tuple , lowerCamelCase_: str , **lowerCamelCase_: int ):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCamelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __SCREAMING_SNAKE_CASE ( *lowerCamelCase_: List[str] , **lowerCamelCase_: Any ):
"""simple docstring"""
snake_case : Any = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase_ , **lowerCamelCase_ ) as tmp_dir:
try:
os.chdir(lowerCamelCase_ )
yield
finally:
os.chdir(lowerCamelCase_ )
@contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import gc
gc.collect()
snake_case : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import gc
gc.collect()
snake_case : Optional[int] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Dict ):
"""simple docstring"""
return deepcopy(lowerCamelCase_ ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(lowerCamelCase_ ).integers(0 , 1_0_0 , 1_0 ).tolist()
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[Any] ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCamelCase_: Dict , *lowerCamelCase_: Tuple , **lowerCamelCase_: Any ):
try:
return func(*lowerCamelCase_ , **lowerCamelCase_ )
except HTTPError as err:
if str(lowerCamelCase_ ).startswith("500" ) or str(lowerCamelCase_ ).startswith("502" ):
pytest.xfail(str(lowerCamelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase_ )
class _a :
def __init__( self : str , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Optional[Any] ) -> Optional[int]:
snake_case : List[Any] = returncode
snake_case : Tuple = stdout
snake_case : Dict = stderr
async def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str , lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
while True:
snake_case : List[Any] = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any , lowerCamelCase_: Dict=None , lowerCamelCase_: Union[str, Any]=None , lowerCamelCase_: int=None , lowerCamelCase_: str=False , lowerCamelCase_: Optional[Any]=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
snake_case : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case : Optional[int] = []
snake_case : Tuple = []
def tee(lowerCamelCase_: Union[str, Any] , lowerCamelCase_: str , lowerCamelCase_: Dict , lowerCamelCase_: Union[str, Any]="" ):
snake_case : Optional[Any] = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any]=None , lowerCamelCase_: Any=None , lowerCamelCase_: List[str]=1_8_0 , lowerCamelCase_: Dict=False , lowerCamelCase_: Optional[Any]=True ):
"""simple docstring"""
snake_case : Optional[int] = asyncio.get_event_loop()
snake_case : str = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
snake_case : Union[str, Any] = " ".join(lowerCamelCase_ )
if result.returncode > 0:
snake_case : List[Any] = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : int = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
snake_case : Dict = re.sub(r"^gw" , "" , lowerCamelCase_ , 0 , re.M )
return int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : int = 2_9_5_0_0
snake_case : str = pytest_xdist_worker_id()
return port + uniq_delta
| 449
| 1
|
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
assert x is not None
assert y is not None
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
# declaring the array for storing the dp values
UpperCamelCase__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
UpperCamelCase__ = 1 if x[i - 1] == y[j - 1] else 0
UpperCamelCase__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
UpperCamelCase__ = ''
UpperCamelCase__ , UpperCamelCase__ = m, n
while i > 0 and j > 0:
UpperCamelCase__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
UpperCamelCase__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
A__ : Optional[Any]= """AGGTAB"""
A__ : Dict= """GXTXAYB"""
A__ : Any= 4
A__ : Optional[int]= """GTAB"""
A__, A__ : List[str]= longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 20
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20
| 1
|
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
return abs(UpperCAmelCase__ ) if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCamelCase , UpperCamelCase :Optional[Any] = y, x % y
return abs(UpperCAmelCase__ )
def _A ( ):
try:
UpperCamelCase :Tuple = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCamelCase :Any = int(nums[0] )
UpperCamelCase :str = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 658
|
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
_SCREAMING_SNAKE_CASE = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_SCREAMING_SNAKE_CASE = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : str = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "switch_transformers"
lowercase = ["past_key_values"]
lowercase = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , __UpperCAmelCase=3_2128 , __UpperCAmelCase=768 , __UpperCAmelCase=64 , __UpperCAmelCase=2048 , __UpperCAmelCase=64 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=False , __UpperCAmelCase=0.0_1 , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=128 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.0_0_1 , __UpperCAmelCase=0.0_0_1 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = d_kv
__UpperCamelCase = d_ff
__UpperCamelCase = num_sparse_encoder_layers
__UpperCamelCase = num_layers
__UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCamelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__UpperCamelCase = self.num_layers // self.num_sparse_encoder_layers
else:
__UpperCamelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__UpperCamelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__UpperCamelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
__UpperCamelCase = num_heads
__UpperCamelCase = num_experts
__UpperCamelCase = expert_capacity
__UpperCamelCase = router_bias
__UpperCamelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
__UpperCamelCase = router_dtype
__UpperCamelCase = router_ignore_padding_tokens
__UpperCamelCase = relative_attention_num_buckets
__UpperCamelCase = relative_attention_max_distance
__UpperCamelCase = dropout_rate
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_factor
__UpperCamelCase = feed_forward_proj
__UpperCamelCase = use_cache
__UpperCamelCase = add_router_probs
__UpperCamelCase = router_z_loss_coef
__UpperCamelCase = router_aux_loss_coef
__UpperCamelCase = self.feed_forward_proj.split('-' )
__UpperCamelCase = act_info[-1]
__UpperCamelCase = act_info[0] == 'gated'
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCamelCase = 'gelu_new'
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
| 293
|
"""simple docstring"""
import string
import numpy
def A ( snake_case :int , snake_case :int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case )
class __lowerCAmelCase :
lowercase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase = numpy.vectorize(lambda __SCREAMING_SNAKE_CASE : x % 36 )
lowercase = numpy.vectorize(__SCREAMING_SNAKE_CASE )
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.modulus(__UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__UpperCamelCase = encrypt_key.shape[0]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key_string.index(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key_string[round(__UpperCAmelCase )]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = len(self.key_string )
if greatest_common_divisor(__UpperCAmelCase , len(self.key_string ) ) != 1:
__UpperCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [char for char in text.upper() if char in self.key_string]
__UpperCamelCase = chars[-1]
while len(__UpperCAmelCase ) % self.break_key != 0:
chars.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = ''
for i in range(0 , len(__UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(__UpperCAmelCase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(self.encrypt_key.dot(__UpperCAmelCase ) ).T.tolist()[
0
]
__UpperCamelCase = ''.join(
self.replace_digits(__UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__UpperCamelCase = i
break
__UpperCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__UpperCAmelCase ) )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.make_decrypt_key()
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = ''
for i in range(0 , len(__UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(__UpperCAmelCase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(decrypt_key.dot(__UpperCAmelCase ) ).T.tolist()[0]
__UpperCamelCase = ''.join(
self.replace_digits(__UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A ( ) -> None:
__UpperCamelCase = int(input('Enter the order of the encryption key: ' ) )
__UpperCamelCase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(snake_case ):
__UpperCamelCase = [int(snake_case ) for x in input().split()]
hill_matrix.append(snake_case )
__UpperCamelCase = HillCipher(numpy.array(snake_case ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__UpperCamelCase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__UpperCamelCase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(snake_case ) )
elif option == "2":
__UpperCamelCase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 293
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A : Optional[Any] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 499
|
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Dict = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
snake_case_ = imread('image_data/lena.jpg', 1)
# convert to its negative
snake_case_ = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 421
| 0
|
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return number | (1 << position)
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def lowercase_ ( __snake_case : int , __snake_case : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowercase_ ( __snake_case : int , __snake_case : int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 1
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase__ ):
a__ : Any = ["""image_processor""", """tokenizer"""]
a__ : Union[str, Any] = """BlipImageProcessor"""
a__ : List[str] = """AutoTokenizer"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add QFormer tokenizer
__lowerCamelCase = qformer_tokenizer
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : ImageInput = None , SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__lowerCamelCase = BatchFeature()
if text is not None:
__lowerCamelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
encoding.update(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = self.qformer_tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase = qformer_text_encoding.pop('''input_ids''' )
__lowerCamelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
def __A ( self : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> int:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __A ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A ( self : List[Any] ) -> Optional[int]:
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> str:
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def __A ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
__lowerCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='''qformer_tokenizer''' )
__lowerCamelCase = cls._get_arguments_from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
args.append(SCREAMING_SNAKE_CASE_ )
return cls(*SCREAMING_SNAKE_CASE_ )
| 298
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 535
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a__ ( unittest.TestCase ):
def UpperCAmelCase( self : int ):
a_ : Any = 0
def UpperCAmelCase( self : Dict ):
a_ : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Any = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Tuple = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : List[Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : List[str] = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : int = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a_ : Tuple = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Tuple = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""image_processor_type""" )
a_ : Tuple = CLIPImageProcessor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
a_ : str = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Union[str, Any] = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
a_ : Optional[int] = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int ):
with self.assertRaisesRegex(
lowerCamelCase_ , """clip-base is not a local folder and is not a valid model identifier""" ):
a_ : Any = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase( self : Optional[Any] ):
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
a_ : Dict = AutoImageProcessor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def UpperCAmelCase( self : Dict ):
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
a_ : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase_ ):
a_ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
a_ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
a_ : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase_ )
a_ : int = AutoImageProcessor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCAmelCase( self : List[str] ):
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = Path(lowerCamelCase_ ) / """preprocessor_config.json"""
a_ : Union[str, Any] = Path(lowerCamelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase_ , """w""" ) )
a_ : str = CustomImageProcessor.from_pretrained(lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase_ )
a_ : Any = AutoImageProcessor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase( self : Union[str, Any] ):
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoImageProcessor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
a_ : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a_ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a_ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 478
|
import numpy as np
__lowerCamelCase = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class a__ :
def __init__( self : Union[str, Any] ):
a_ : List[Any] = np.array(lowerCamelCase_ )
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str ):
a_ , a_ : Optional[int] = np.where(letter == self.SQUARE )
a_ : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int ):
a_ : str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : str ):
a_ : List[str] = message.lower()
a_ : str = message.replace(""" """ , """""" )
a_ : Union[str, Any] = message.replace("""j""" , """i""" )
a_ : Optional[Any] = np.empty((2, len(lowerCamelCase_ )) )
for letter_index in range(len(lowerCamelCase_ ) ):
a_ : int = self.letter_to_numbers(message[letter_index] )
a_ : str = numbers[0]
a_ : int = numbers[1]
a_ : int = first_step.reshape(2 * len(lowerCamelCase_ ) )
a_ : Optional[Any] = """"""
for numbers_index in range(len(lowerCamelCase_ ) ):
a_ : Optional[Any] = int(second_step[numbers_index * 2] )
a_ : Optional[Any] = int(second_step[(numbers_index * 2) + 1] )
a_ : Optional[Any] = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
a_ : List[str] = encoded_message + letter
return encoded_message
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : str ):
a_ : Optional[Any] = message.lower()
message.replace(""" """ , """""" )
a_ : int = np.empty(2 * len(lowerCamelCase_ ) )
for letter_index in range(len(lowerCamelCase_ ) ):
a_ : str = self.letter_to_numbers(message[letter_index] )
a_ : Optional[int] = numbers[0]
a_ : Optional[Any] = numbers[1]
a_ : Tuple = first_step.reshape((2, len(lowerCamelCase_ )) )
a_ : Optional[int] = """"""
for numbers_index in range(len(lowerCamelCase_ ) ):
a_ : Dict = int(second_step[0, numbers_index] )
a_ : Tuple = int(second_step[1, numbers_index] )
a_ : Tuple = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
a_ : Union[str, Any] = decoded_message + letter
return decoded_message
| 478
| 1
|
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = sorted(zip(lowerCAmelCase_ , lowerCAmelCase_ ) , key=lambda lowerCAmelCase_ : x[0] / x[1] , reverse=lowerCAmelCase_ )
lowercase , lowercase = [i[0] for i in r], [i[1] for i in r]
lowercase = list(accumulate(lowerCAmelCase_ ) )
lowercase = bisect(lowerCAmelCase_ , lowerCAmelCase_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (-y * np.log(lowerCAmelCase_ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase_ ) ) )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=7_0000 ):
"""simple docstring"""
lowercase = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase_ ):
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = sigmoid_function(lowerCAmelCase_ )
lowercase = np.dot(x.T , h - y ) / y.size
lowercase = theta - alpha * gradient # updating the weights
lowercase = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = sigmoid_function(lowerCAmelCase_ )
lowercase = cost_function(lowerCAmelCase_ , lowerCAmelCase_ )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : int = datasets.load_iris()
__lowerCamelCase : Any = iris.data[:, :2]
__lowerCamelCase : List[Any] = (iris.target != 0) * 1
__lowerCamelCase : Tuple = 0.1
__lowerCamelCase : Dict = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("theta: ", theta) # printing the theta i.e our weights vector
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return sigmoid_function(
np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[int] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 310
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _UpperCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''Pix2StructImageProcessor'''
a__ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = False
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 2_0_4_8 , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
__magic_name__ :int = self.tokenizer
__magic_name__ :List[str] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__magic_name__ :Optional[Any] = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , max_patches=_UpperCAmelCase , **_UpperCAmelCase )
else:
# add pixel_values and bbox
__magic_name__ :Dict = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
__magic_name__ :Dict = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
if "attention_mask" in text_encoding:
__magic_name__ :List[str] = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
__magic_name__ :Dict = text_encoding.pop('''input_ids''' )
else:
__magic_name__ :Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCAmelCase )
return encoding_image_processor
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer.model_input_names
__magic_name__ :Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 713
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( snake_case ):
"""simple docstring"""
def decorator(snake_case ):
__magic_name__ :int = getattr(snake_case, '''handle_key''', [] )
handle += [key]
setattr(snake_case, '''handle_key''', snake_case )
return func
return decorator
def __lowercase ( *snake_case ):
"""simple docstring"""
def decorator(snake_case ):
__magic_name__ :Union[str, Any] = getattr(snake_case, '''handle_key''', [] )
handle += keys
setattr(snake_case, '''handle_key''', snake_case )
return func
return decorator
class lowerCamelCase_ ( lowerCamelCase ):
def __new__( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = super().__new__(cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , '''key_handler''' ):
setattr(__lowerCAmelCase , '''key_handler''' , {} )
setattr(__lowerCAmelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ :Dict = getattr(__lowerCAmelCase , '''handle_key''' , [] )
for key in handled_keys:
__magic_name__ :List[Any] = value
return new_cls
@staticmethod
def A ( cls ):
"""simple docstring"""
__magic_name__ :Tuple = get_character()
if char != KEYMAP["undefined"]:
__magic_name__ :Dict = ord(__lowerCAmelCase )
__magic_name__ :str = cls.key_handler.get(__lowerCAmelCase )
if handler:
__magic_name__ :Optional[Any] = char
return handler(cls )
else:
return None
def __lowercase ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 180
| 0
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowerCamelCase = re.compile(r"\b(a|an|the)\b", re.UNICODE)
__lowerCamelCase = None
def lowercase ( ) -> Optional[int]:
__magic_name__ = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__UpperCamelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__UpperCamelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase ( __UpperCamelCase ) -> Optional[int]:
__magic_name__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def lowercase ( __UpperCamelCase ) -> Dict:
def remove_articles(__UpperCamelCase ):
return ARTICLES_REGEX.sub(''' ''' , __UpperCamelCase )
def white_space_fix(__UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase ):
__magic_name__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def lowercase ( __UpperCamelCase ) -> Any:
if not s:
return []
return normalize_answer(__UpperCamelCase ).split()
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
__magic_name__ = get_tokens(__UpperCamelCase )
__magic_name__ = get_tokens(__UpperCamelCase )
__magic_name__ = collections.Counter(__UpperCamelCase ) & collections.Counter(__UpperCamelCase )
__magic_name__ = sum(common.values() )
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ = 1.0 * num_same / len(__UpperCamelCase )
__magic_name__ = 1.0 * num_same / len(__UpperCamelCase )
__magic_name__ = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
__magic_name__ = {}
__magic_name__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ = qa['''id''']
__magic_name__ = [t for t in qa['''answers''']['''text'''] if normalize_answer(__UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__magic_name__ = preds[qid]
# Take max over all gold answers
__magic_name__ = max(compute_exact(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
__magic_name__ = max(compute_fa(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
__magic_name__ = {}
for qid, s in scores.items():
__magic_name__ = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ = float(not qid_to_has_ans[qid] )
else:
__magic_name__ = s
return new_scores
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> List[Any]:
if not qid_list:
__magic_name__ = len(__UpperCamelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__magic_name__ = len(__UpperCamelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
for k in new_eval:
__magic_name__ = new_eval[k]
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
plt.step(__UpperCamelCase , __UpperCamelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__UpperCamelCase , __UpperCamelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__UpperCamelCase )
plt.savefig(__UpperCamelCase )
plt.clf()
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ) -> List[Any]:
__magic_name__ = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
__magic_name__ = 0.0
__magic_name__ = 1.0
__magic_name__ = 0.0
__magic_name__ = [1.0]
__magic_name__ = [0.0]
__magic_name__ = 0.0
for i, qid in enumerate(__UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ = true_pos / float(i + 1 )
__magic_name__ = true_pos / float(__UpperCamelCase )
if i == len(__UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__UpperCamelCase )
recalls.append(__UpperCamelCase )
if out_image:
plot_pr_curve(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return {"ap": 100.0 * avg_prec}
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if out_image_dir and not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
__magic_name__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__magic_name__ = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__magic_name__ = {k: float(__UpperCamelCase ) for k, v in qid_to_has_ans.items()}
__magic_name__ = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_exact''' )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_f1''' )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_oracle''' )
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
if not qid_list:
return
__magic_name__ = [na_probs[k] for k in qid_list]
__magic_name__ = np.ones_like(__UpperCamelCase ) / float(len(__UpperCamelCase ) )
plt.hist(__UpperCamelCase , weights=__UpperCamelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__UpperCamelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
__magic_name__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ = num_no_ans
__magic_name__ = cur_score
__magic_name__ = 0.0
__magic_name__ = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
for i, qid in enumerate(__UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ = scores[qid]
else:
if preds[qid]:
__magic_name__ = -1
else:
__magic_name__ = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ = cur_score
__magic_name__ = na_probs[qid]
return 100.0 * best_score / len(__UpperCamelCase ), best_thresh
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
__magic_name__ , __magic_name__ = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__magic_name__ , __magic_name__ = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__magic_name__ = best_exact
__magic_name__ = exact_thresh
__magic_name__ = best_fa
__magic_name__ = fa_thresh
def lowercase ( ) -> int:
with open(OPTS.data_file ) as f:
__magic_name__ = json.load(__UpperCamelCase )
__magic_name__ = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__magic_name__ = json.load(__UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ = json.load(__UpperCamelCase )
else:
__magic_name__ = {k: 0.0 for k in preds}
__magic_name__ = make_qid_to_has_ans(__UpperCamelCase ) # maps qid to True/False
__magic_name__ = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ = get_raw_scores(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
__magic_name__ = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
__magic_name__ = make_eval_dict(__UpperCamelCase , __UpperCamelCase )
if has_ans_qids:
__magic_name__ = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''HasAns''' )
if no_ans_qids:
__magic_name__ = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
else:
print(json.dumps(__UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
__lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 490
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 490
| 1
|
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
def lowerCamelCase__ ( _A , _A=0.9_99 , _A="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
snake_case_ = []
for i in range(_lowerCamelCase ):
snake_case_ = i / num_diffusion_timesteps
snake_case_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = 1
@register_to_config
def __init__( self : int , __lowercase : Optional[int] = 10_00 , __lowercase : Optional[Any] = 0.0001 , __lowercase : Optional[int] = 0.02 , __lowercase : Dict = "linear" , __lowercase : Optional[int] = None , __lowercase : Any = True , __lowercase : Tuple = True , __lowercase : int = 0 , __lowercase : Optional[int] = "epsilon" , __lowercase : List[str] = 1.0 , **__lowercase : str , ):
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , UpperCamelCase__ ) is not None:
snake_case_ = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
snake_case_ = kwargs["set_alpha_to_one"]
if trained_betas is not None:
snake_case_ = torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case_ = torch.linspace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ = betas_for_alpha_bar(UpperCamelCase__ )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
snake_case_ = 1.0 - self.betas
snake_case_ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
snake_case_ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
snake_case_ = 1.0
# setable values
snake_case_ = None
snake_case_ = torch.from_numpy(np.arange(0 , UpperCamelCase__ ).copy().astype(np.intaa ) )
def snake_case__ ( self : Optional[int] , __lowercase : Any , __lowercase : Optional[int] = None ):
"""simple docstring"""
return sample
def snake_case__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : Any = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps." )
snake_case_ = num_inference_steps
snake_case_ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case_ = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round().copy().astype(np.intaa )
snake_case_ = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
self.timesteps += self.config.steps_offset
def snake_case__ ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : Tuple , __lowercase : Union[str, Any] = 0.0 , __lowercase : Tuple = False , __lowercase : str = None , __lowercase : List[Any] = True , ):
"""simple docstring"""
snake_case_ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
snake_case_ = self.alphas_cumprod[timestep]
snake_case_ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
snake_case_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
snake_case_ = model_output
elif self.config.prediction_type == "sample":
snake_case_ = model_output
snake_case_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
snake_case_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
snake_case_ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
snake_case_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 700
|
def lowerCamelCase__ ( _A = 600851475143 ):
'''simple docstring'''
try:
snake_case_ = int(_A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case_ = 2
snake_case_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ = i
while n % i == 0:
snake_case_ = n // i
i += 1
return int(_A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=2 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=6 , _lowerCAmelCase=6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : Optional[Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = num_channels
_lowercase : Tuple = image_size
_lowercase : str = patch_size
_lowercase : Union[str, Any] = text_seq_length
_lowercase : List[str] = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : Dict = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Optional[int] = vocab_size
_lowercase : int = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = coordinate_size
_lowercase : Tuple = shape_size
_lowercase : Any = num_labels
_lowercase : List[str] = num_choices
_lowercase : Optional[Any] = scope
_lowercase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase : Optional[Any] = text_seq_length
_lowercase : Dict = (image_size // patch_size) ** 2 + 1
_lowercase : Union[str, Any] = self.text_seq_length + self.image_seq_length
def __a ( self ):
_lowercase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : Optional[int] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Any = bbox[i, j, 0]
_lowercase : int = t
_lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : int = None
if self.use_input_mask:
_lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = LayoutLMvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# text + image
_lowercase : str = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase )
_lowercase : Optional[Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase : str = model(pixel_values=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : str = LayoutLMvaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Any = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = self.num_labels
_lowercase : Optional[Any] = LayoutLMvaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Tuple = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Union[str, Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Any = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __a ( self ):
_lowercase : List[Any] = LayoutLMvaModelTester(self )
_lowercase : Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : Optional[int] = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Tuple = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in get_values(_lowerCAmelCase ):
_lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowercase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
return inputs_dict
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = LayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( ) -> Any:
_lowercase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def __a ( self ):
_lowercase : Tuple = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_lowerCAmelCase )
_lowercase : List[Any] = self.default_image_processor
_lowercase : List[str] = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values.to(_lowerCAmelCase )
_lowercase : Tuple = torch.tensor([[1, 2]] )
_lowercase : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowercase : int = model(
input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , )
# verify the logits
_lowercase : Union[str, Any] = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
_lowercase : int = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
SCREAMING_SNAKE_CASE_: Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE_: Optional[int] = field(
default=__UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE_: Optional[int] = field(
default=__UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE_: Optional[int] = field(
default=__UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _lowerCAmelCase :
SCREAMING_SNAKE_CASE_: str = field(
default=__UpperCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE_: str = field(
default=__UpperCAmelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
SCREAMING_SNAKE_CASE_: Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
SCREAMING_SNAKE_CASE_: Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_: Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_: Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE_: Optional[bool] = field(
default=__UpperCAmelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE_: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
SCREAMING_SNAKE_CASE_: bool = field(
default=__UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli', lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Any = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
'xnli', model_args.language, split='train', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
_SCREAMING_SNAKE_CASE : int = load_dataset(
'xnli', model_args.train_language, split='train', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : Dict = train_dataset.features['label'].names
if training_args.do_eval:
_SCREAMING_SNAKE_CASE : Any = load_dataset(
'xnli', model_args.language, split='validation', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : List[Any] = eval_dataset.features['label'].names
if training_args.do_predict:
_SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
'xnli', model_args.language, split='test', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : int = predict_dataset.features['label'].names
# Labels
_SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowerCamelCase, idalabel={str(lowerCamelCase ): label for i, label in enumerate(lowerCamelCase )}, labelaid={label: i for i, label in enumerate(lowerCamelCase )}, finetuning_task='xnli', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_SCREAMING_SNAKE_CASE : Tuple = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_SCREAMING_SNAKE_CASE : Tuple = False
def preprocess_function(lowerCamelCase ):
# Tokenize the texts
return tokenizer(
examples['premise'], examples['hypothesis'], padding=lowerCamelCase, max_length=data_args.max_seq_length, truncation=lowerCamelCase, )
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE : List[Any] = min(len(lowerCamelCase ), data_args.max_train_samples )
_SCREAMING_SNAKE_CASE : Tuple = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_SCREAMING_SNAKE_CASE : Tuple = train_dataset.map(
lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on train dataset', )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase ) ), 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE : Dict = min(len(lowerCamelCase ), data_args.max_eval_samples )
_SCREAMING_SNAKE_CASE : Optional[Any] = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_SCREAMING_SNAKE_CASE : Tuple = eval_dataset.map(
lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on validation dataset', )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_SCREAMING_SNAKE_CASE : Tuple = min(len(lowerCamelCase ), data_args.max_predict_samples )
_SCREAMING_SNAKE_CASE : Dict = predict_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_SCREAMING_SNAKE_CASE : Tuple = predict_dataset.map(
lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on prediction dataset', )
# Get the metric function
_SCREAMING_SNAKE_CASE : Tuple = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = p.predictions[0] if isinstance(p.predictions, lowerCamelCase ) else p.predictions
_SCREAMING_SNAKE_CASE : int = np.argmax(lowerCamelCase, axis=1 )
return metric.compute(predictions=lowerCamelCase, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_SCREAMING_SNAKE_CASE : List[str] = default_data_collator
elif training_args.fpaa:
_SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase, pad_to_multiple_of=8 )
else:
_SCREAMING_SNAKE_CASE : Tuple = None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=lowerCamelCase, args=lowerCamelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=lowerCamelCase, tokenizer=lowerCamelCase, data_collator=lowerCamelCase, )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = last_checkpoint
_SCREAMING_SNAKE_CASE : List[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = train_result.metrics
_SCREAMING_SNAKE_CASE : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
_SCREAMING_SNAKE_CASE : Tuple = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train', lowerCamelCase )
trainer.save_metrics('train', lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate(eval_dataset=lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.log_metrics('eval', lowerCamelCase )
trainer.save_metrics('eval', lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = trainer.predict(lowerCamelCase, metric_key_prefix='predict' )
_SCREAMING_SNAKE_CASE : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase )
)
_SCREAMING_SNAKE_CASE : Optional[Any] = min(lowerCamelCase, len(lowerCamelCase ) )
trainer.log_metrics('predict', lowerCamelCase )
trainer.save_metrics('predict', lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.argmax(lowerCamelCase, axis=1 )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(training_args.output_dir, 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase, 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 621
| 0
|
from sklearn.metrics import mean_squared_error
import datasets
__snake_case : str = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
__snake_case : List[str] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
__snake_case : Tuple = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase__ ( datasets.Metric):
'''simple docstring'''
def A__ ( self ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def A__ ( self ) ->Optional[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def A__ ( self , A , A , A=None , A="uniform_average" , A=True ) ->Dict:
UpperCAmelCase__ :str = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 705
|
from math import factorial
def A ( SCREAMING_SNAKE_CASE = 100 ):
"""simple docstring"""
return sum(map(SCREAMING_SNAKE_CASE , str(factorial(SCREAMING_SNAKE_CASE ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 433
| 0
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int = 10_00 ) -> int:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 1, 1
UpperCAmelCase_ : Dict = 2
while True:
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = fa + fa
UpperCAmelCase_ , UpperCAmelCase_ : str = fa, f
index += 1
for _ in str(_SCREAMING_SNAKE_CASE ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 71
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """LayoutLMv3ImageProcessor"""
__lowerCAmelCase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Dict , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase_ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
UpperCamelCase = self.image_processor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase = features["""words"""]
UpperCamelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
# add pixel values
UpperCamelCase = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase = self.get_overflowing_images(lowerCamelCase_ , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase = images
return encoded_inputs
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}""" )
return images_with_overflow
def lowerCamelCase_ ( self : Union[str, Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : str ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase_ , )
return self.image_processor
| 537
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 307
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ['''input_features''', '''is_longer''']
def __init__( self , lowercase=64 , lowercase=48_000 , lowercase=480 , lowercase=10 , lowercase=1_024 , lowercase=0.0 , lowercase=False , lowercase = 0 , lowercase = 14_000 , lowercase = None , lowercase = "fusion" , lowercase = "repeatpad" , **lowercase , ) -> str:
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
_a : int = top_db
_a : Union[str, Any] = truncation
_a : Optional[int] = padding
_a : List[Any] = fft_window_size
_a : str = (fft_window_size >> 1) + 1
_a : Optional[Any] = hop_length
_a : Tuple = max_length_s
_a : Optional[Any] = max_length_s * sampling_rate
_a : Dict = sampling_rate
_a : int = frequency_min
_a : Union[str, Any] = frequency_max
_a : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm=lowercase , mel_scale='''htk''' , )
_a : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case__( self ) -> Dict[str, Any]:
_a : int = copy.deepcopy(self.__dict__ )
_a : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case__( self , lowercase , lowercase = None ) -> np.ndarray:
_a : List[str] = spectrogram(
lowercase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowercase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def snake_case__( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
_a : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_a : Optional[int] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_a : str = [0]
# randomly choose index for each part
_a : str = np.random.choice(ranges[0] )
_a : Optional[int] = np.random.choice(ranges[1] )
_a : Tuple = np.random.choice(ranges[2] )
_a : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
_a : List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
_a : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_a : List[str] = torch.tensor(mel[None, None, :] )
_a : str = torch.nn.functional.interpolate(
lowercase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowercase )
_a : Dict = mel_shrink[0][0].numpy()
_a : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def snake_case__( self , lowercase , lowercase , lowercase , lowercase ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_a : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_a : List[Any] = len(lowercase ) - max_length
_a : Union[str, Any] = np.random.randint(0 , overflow + 1 )
_a : Optional[int] = waveform[idx : idx + max_length]
_a : Optional[int] = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_a : str = self._np_extract_fbank_features(lowercase , self.mel_filters )
_a : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_a : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_a : Dict = np.stack([mel, mel, mel, mel] , axis=0 )
_a : Tuple = False
else:
_a : List[str] = self._random_mel_fusion(lowercase , lowercase , lowercase )
_a : Tuple = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
_a : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_a : Tuple = int(max_length / len(lowercase ) )
_a : str = np.stack(np.tile(lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_a : int = int(max_length / len(lowercase ) )
_a : Optional[Any] = np.stack(np.tile(lowercase , lowercase ) )
_a : str = np.pad(lowercase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_a : List[Any] = self._np_extract_fbank_features(lowercase , self.mel_filters )
_a : int = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_a : Optional[int] = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
_a : str = truncation if truncation is not None else self.truncation
_a : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_a : List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_a : Dict = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a : List[str] = [np.asarray(lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
_a : Any = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : int = [np.asarray(lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
_a : Union[str, Any] = [
self._get_input_mel(lowercase , max_length if max_length else self.nb_max_samples , lowercase , lowercase )
for waveform in raw_speech
]
_a : Tuple = []
_a : int = []
for mel, longer in padded_inputs:
input_mel.append(lowercase )
is_longer.append(lowercase )
if truncation == "fusion" and sum(lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_a : str = np.random.randint(0 , len(lowercase ) )
_a : Optional[int] = True
if isinstance(input_mel[0] , lowercase ):
_a : Union[str, Any] = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_a : Dict = [[longer] for longer in is_longer]
_a : int = {'''input_features''': input_mel, '''is_longer''': is_longer}
_a : str = BatchFeature(lowercase )
if return_tensors is not None:
_a : Dict = input_features.convert_to_tensors(lowercase )
return input_features
| 307
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MobileNetV2FeatureExtractor''']
__magic_name__ = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : int = None
lowercase : str = logging.get_logger(__name__)
lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase : Dict = {
'''google/rembert''': 2_56,
}
lowercase : Dict = '''▁'''
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = RemBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Any = do_lower_case
snake_case_ : Dict = remove_space
snake_case_ : Optional[Any] = keep_accents
snake_case_ : Tuple = vocab_file
snake_case_ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
snake_case_ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 568
| 0
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (__lowercase ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 226
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowerCamelCase__ = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowerCamelCase__ = '''▁'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase_ = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = remove_space
lowerCAmelCase_ = keep_accents
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __a ( self ) -> Dict:
return len(self.sp_model )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _a ) -> Tuple:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _a ) -> str:
if self.remove_space:
lowerCAmelCase_ = " ".join(inputs.strip().split() )
else:
lowerCAmelCase_ = inputs
lowerCAmelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCAmelCase_ = unicodedata.normalize("NFKD" , _a )
lowerCAmelCase_ = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
lowerCAmelCase_ = outputs.lower()
return outputs
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = self.preprocess_text(_a )
lowerCAmelCase_ = self.sp_model.encode(_a , out_type=_a )
lowerCAmelCase_ = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ = cur_pieces[1:]
else:
lowerCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def __a ( self , _a ) -> Optional[Any]:
return self.sp_model.PieceToId(_a )
def __a ( self , _a ) -> Optional[int]:
return self.sp_model.IdToPiece(_a )
def __a ( self , _a ) -> Optional[int]:
lowerCAmelCase_ = []
lowerCAmelCase_ = ""
lowerCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
lowerCAmelCase_ = True
lowerCAmelCase_ = []
else:
current_sub_tokens.append(_a )
lowerCAmelCase_ = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 226
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Any = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCAmelCase_ : List[str] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCAmelCase_ : str = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def A_ ( self : List[str] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int ) -> List[str]:
'''simple docstring'''
A = ZeroShotClassificationPipeline(
model=snake_case , tokenizer=snake_case , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def A_ ( self : str , snake_case : int , snake_case : str ) -> Dict:
'''simple docstring'''
A = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(snake_case , {'sequence': ANY(snake_case ), 'labels': [ANY(snake_case )], 'scores': [ANY(snake_case )]} )
# No kwarg
A = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(snake_case , {'sequence': ANY(snake_case ), 'labels': [ANY(snake_case )], 'scores': [ANY(snake_case )]} )
A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(snake_case , {'sequence': ANY(snake_case ), 'labels': [ANY(snake_case )], 'scores': [ANY(snake_case )]} )
A = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
snake_case , {'sequence': ANY(snake_case ), 'labels': [ANY(snake_case ), ANY(snake_case )], 'scores': [ANY(snake_case ), ANY(snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
snake_case , {'sequence': ANY(snake_case ), 'labels': [ANY(snake_case ), ANY(snake_case )], 'scores': [ANY(snake_case ), ANY(snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
A = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(snake_case , {'sequence': ANY(snake_case ), 'labels': [ANY(snake_case )], 'scores': [ANY(snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
A = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'labels': [ANY(snake_case ), ANY(snake_case )], 'scores': [ANY(snake_case ), ANY(snake_case )]}
for i in range(1 )
] , )
A = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'labels': [ANY(snake_case ), ANY(snake_case )], 'scores': [ANY(snake_case ), ANY(snake_case )]}
for i in range(2 )
] , )
with self.assertRaises(snake_case ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(snake_case ):
classifier(snake_case , candidate_labels='politics' )
with self.assertRaises(snake_case ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(snake_case ):
classifier('Who are you voting for in 2020?' , candidate_labels=snake_case )
with self.assertRaises(snake_case ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(snake_case ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=snake_case , )
self.run_entailment_id(snake_case )
def A_ ( self : Optional[int] , snake_case : Pipeline ) -> Union[str, Any]:
'''simple docstring'''
A = zero_shot_classifier.model.config
A = config.labelaid
A = zero_shot_classifier.entailment_id
A = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A = original_labelaid
self.assertEqual(snake_case , zero_shot_classifier.entailment_id )
@require_torch
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def A_ ( self : int ) -> int:
'''simple docstring'''
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(snake_case ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(snake_case ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def A_ ( self : str ) -> Any:
'''simple docstring'''
A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(snake_case ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
A = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=snake_case , )
self.assertEqual(
nested_simplify(snake_case ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(snake_case ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
A = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=snake_case , )
self.assertEqual(
nested_simplify(snake_case ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 109
|
"""simple docstring"""
import re
def lowerCAmelCase__ ( lowerCamelCase__ ) -> list:
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def lowerCAmelCase__ ( lowerCamelCase__ ) -> str:
A = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
try:
A = split_input(lowerCamelCase__ )
if upper:
A = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
A = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase__ ( lowerCamelCase__ ) -> str:
return to_simple_case(lowerCamelCase__ )
def lowerCAmelCase__ ( lowerCamelCase__ ) -> str:
try:
A = to_simple_case(lowerCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , '_' )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , '-' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 109
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = ort.SessionOptions()
UpperCAmelCase_ : int = False
return options
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
UpperCAmelCase_ : Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : int = "A red cat sitting on a park bench"
UpperCAmelCase_ : Any = np.random.RandomState(0 )
UpperCAmelCase_ : Optional[Any] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase_ , output_type="np" , )
UpperCAmelCase_ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 95
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''informer'''
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 100 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ) -> Tuple:
# time series specific configuration
UpperCAmelCase_ : str = prediction_length
UpperCAmelCase_ : Tuple = context_length or prediction_length
UpperCAmelCase_ : Any = distribution_output
UpperCAmelCase_ : Union[str, Any] = loss
UpperCAmelCase_ : Any = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : List[Any] = scaling
UpperCAmelCase_ : List[str] = num_dynamic_real_features
UpperCAmelCase_ : int = num_static_real_features
UpperCAmelCase_ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Optional[Any] = cardinality
else:
UpperCAmelCase_ : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Any = embedding_dimension
else:
UpperCAmelCase_ : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = decoder_layers
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Optional[Any] = use_cache
# Informer
UpperCAmelCase_ : int = attention_type
UpperCAmelCase_ : List[str] = sampling_factor
UpperCAmelCase_ : Union[str, Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 95
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list:
"""simple docstring"""
snake_case_ : Any = len(__magic_name__ )
snake_case_ : int = []
for i in range(len(__magic_name__ ) - pat_len + 1 ):
snake_case_ : List[str] = True
for j in range(__magic_name__ ):
if s[i + j] != pattern[j]:
snake_case_ : List[Any] = False
break
if match_found:
position.append(__magic_name__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 656
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = SwinConfig()
SCREAMING_SNAKE_CASE : Optional[int] = swin_name.split('''_''' )
SCREAMING_SNAKE_CASE : int = name_split[1]
SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[4] )
SCREAMING_SNAKE_CASE : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[Any] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : Dict = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : Optional[int] = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : Tuple = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 1_92
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE : int = 2_18_41
else:
SCREAMING_SNAKE_CASE : str = 10_00
SCREAMING_SNAKE_CASE : int = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : Tuple = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[str] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Any = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Any = img_size
SCREAMING_SNAKE_CASE : Any = num_classes
SCREAMING_SNAKE_CASE : str = embed_dim
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Tuple = num_heads
SCREAMING_SNAKE_CASE : List[Any] = window_size
return config
def __A ( a_ : int )-> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : Tuple = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE : List[Any] = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''head''' , '''classifier''' )
else:
SCREAMING_SNAKE_CASE : Any = """swin.""" + name
return name
def __A ( a_ : Dict , a_ : int )-> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(a__ )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(key_split[1] )
SCREAMING_SNAKE_CASE : Optional[int] = int(key_split[3] )
SCREAMING_SNAKE_CASE : Dict = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : Dict = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : int = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Optional[int] = val[
:dim
]
SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : str = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE : str = val
return orig_state_dict
def __A ( a_ : Optional[Any] , a_ : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = timm.create_model(a__ , pretrained=a__ )
timm_model.eval()
SCREAMING_SNAKE_CASE : Dict = get_swin_config(a__ )
SCREAMING_SNAKE_CASE : List[str] = SwinForImageClassification(a__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(timm_model.state_dict() , a__ )
model.load_state_dict(a__ )
SCREAMING_SNAKE_CASE : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
SCREAMING_SNAKE_CASE : str = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=a__ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int = timm_model(inputs['''pixel_values'''] )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ ).logits
assert torch.allclose(a__ , a__ , atol=1E-3 )
print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase__ : str = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 698
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A__:
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.0_2 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Tuple:
a_ : Union[str, Any] = parent
a_ : int = batch_size
a_ : int = seq_length
a_ : int = is_training
a_ : List[Any] = use_input_mask
a_ : str = use_token_type_ids
a_ : List[str] = use_labels
a_ : List[Any] = vocab_size
a_ : Dict = hidden_size
a_ : List[Any] = num_hidden_layers
a_ : str = num_attention_heads
a_ : str = intermediate_size
a_ : Optional[int] = hidden_act
a_ : Optional[int] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : Optional[Any] = type_vocab_size
a_ : Any = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : Dict = scope
def UpperCamelCase__ ( self ) -> Any:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Union[str, Any] = None
if self.use_input_mask:
a_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Any = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : int = ids_tensor([self.batch_size] , self.num_choices )
a_ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Any:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , use_stable_embedding=_lowercase , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
a_ : Optional[Any] = OpenLlamaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : List[Any] = model(_lowercase , attention_mask=_lowercase )
a_ : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Union[str, Any]:
a_ : int = True
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
a_ : Any = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , )
a_ : Optional[int] = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Any:
a_ : Dict = OpenLlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : int = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[str]:
a_ : Dict = True
a_ : Optional[int] = True
a_ : Dict = OpenLlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
a_ : List[str] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , use_cache=_lowercase , )
a_ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
a_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_hidden_states=_lowercase , )["""hidden_states"""][0]
a_ : Dict = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["""hidden_states"""][0]
# select random slice
a_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Any = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Union[str, Any] = config_and_inputs
a_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__(a_, a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A : Optional[int] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : Any = False
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Optional[int] = OpenLlamaModelTester(self )
a_ : Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : Dict = type
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = 3
a_ : List[str] = input_dict["""input_ids"""]
a_ : List[str] = input_ids.ne(1 ).to(_lowercase )
a_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a_ : int = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Tuple = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = 3
a_ : List[Any] = """single_label_classification"""
a_ : Dict = input_dict["""input_ids"""]
a_ : int = input_ids.ne(1 ).to(_lowercase )
a_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a_ : Any = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Any = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> Tuple:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = 3
a_ : List[str] = """multi_label_classification"""
a_ : Dict = input_dict["""input_ids"""]
a_ : Any = input_ids.ne(1 ).to(_lowercase )
a_ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a_ : Optional[int] = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : int = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCamelCase__ ( self ) -> Tuple:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCamelCase__ ( self , _lowercase ) -> str:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
a_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
original_model.to(_lowercase )
original_model.eval()
a_ : Union[str, Any] = original_model(_lowercase ).last_hidden_state
a_ : str = original_model(_lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a_ : int = {"""type""": scaling_type, """factor""": 1_0.0}
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
scaled_model.to(_lowercase )
scaled_model.eval()
a_ : Optional[int] = scaled_model(_lowercase ).last_hidden_state
a_ : Tuple = scaled_model(_lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
| 540
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 704
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=13 , lowerCAmelCase__ : Any=32 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : List[Any]=640 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]="silu" , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Optional[int]=32 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : List[str]=None , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = last_hidden_size
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = conv_kernel_size
_UpperCamelCase = output_stride
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = classifier_dropout_prob
_UpperCamelCase = use_labels
_UpperCamelCase = is_training
_UpperCamelCase = num_labels
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = MobileViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileViTForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCamelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : Any = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
_snake_case : Dict = False
_snake_case : List[str] = False
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTModelTester(self )
_UpperCamelCase = MobileViTConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ):
_UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = 5
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCamelCase = 2
for i in range(len(lowerCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = MobileViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowerCAmelCase__ )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
_UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_UpperCamelCase = model.to(lowerCAmelCase__ )
_UpperCamelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
_UpperCamelCase = outputs.logits
# verify the logits
_UpperCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_UpperCamelCase = model.to(lowerCAmelCase__ )
_UpperCamelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowerCAmelCase__ )
_UpperCamelCase = outputs.logits.detach().cpu()
_UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(50, 60)] )
_UpperCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
_UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
_UpperCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
| 98
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase__ : Optional[List[str]] = None
lowercase__ : List[Any] = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase__ : int = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : bool = True
_snake_case : Optional[str] = None
# Automatically constructed
_snake_case : ClassVar[str] = "PIL.Image.Image"
_snake_case : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_snake_case : str = field(default='Image' , init=__magic_name__ , repr=__magic_name__ )
def __call__( self : Any ) -> str:
'''simple docstring'''
return self.pa_type
def snake_case__ ( self : Dict , lowerCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = np.array(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase__ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : dict , lowerCAmelCase__ : Dict=None ) -> "PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
_UpperCamelCase = {}
_UpperCamelCase , _UpperCamelCase = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(lowerCAmelCase__ ):
_UpperCamelCase = PIL.Image.open(lowerCAmelCase__ )
else:
_UpperCamelCase = path.split('''::''' )[-1]
try:
_UpperCamelCase = string_to_dict(lowerCAmelCase__ , config.HUB_DATASETS_URL )['''repo_id''']
_UpperCamelCase = token_per_repo_id.get(lowerCAmelCase__ )
except ValueError:
_UpperCamelCase = None
with xopen(lowerCAmelCase__ , '''rb''' , use_auth_token=lowerCAmelCase__ ) as f:
_UpperCamelCase = BytesIO(f.read() )
_UpperCamelCase = PIL.Image.open(bytes_ )
else:
_UpperCamelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def snake_case__ ( self : int , lowerCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
_UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
_UpperCamelCase = storage.field('''bytes''' )
else:
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
_UpperCamelCase = storage.field('''path''' )
else:
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_UpperCamelCase = pa.array(
[encode_np_array(np.array(lowerCAmelCase__ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_UpperCamelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
_UpperCamelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase__ : Optional[Any] ):
with xopen(lowerCAmelCase__ , '''rb''' ) as f:
_UpperCamelCase = f.read()
return bytes_
_UpperCamelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_UpperCamelCase = pa.array(
[os.path.basename(lowerCAmelCase__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
def a__ ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_UpperCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a__ ( lowercase : "PIL.Image.Image" ) -> bytes:
"""simple docstring"""
_UpperCamelCase = BytesIO()
if image.format in list_image_compression_formats():
_UpperCamelCase = image.format
else:
_UpperCamelCase = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(lowercase, format=lowercase )
return buffer.getvalue()
def a__ ( lowercase : "PIL.Image.Image" ) -> dict:
"""simple docstring"""
if hasattr(lowercase, '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase )}
def a__ ( lowercase : np.ndarray ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
_UpperCamelCase = array.dtype
_UpperCamelCase = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
_UpperCamelCase = dtype.kind
_UpperCamelCase = dtype.itemsize
_UpperCamelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_UpperCamelCase = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_UpperCamelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_UpperCamelCase = dtype_byteorder + dtype_kind + str(lowercase )
_UpperCamelCase = np.dtype(lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_UpperCamelCase = PIL.Image.fromarray(array.astype(lowercase ) )
return {"path": None, "bytes": image_to_bytes(lowercase )}
def a__ ( lowercase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
_UpperCamelCase , _UpperCamelCase = first_non_null_value(lowercase )
if isinstance(lowercase, lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase, np.ndarray ):
_UpperCamelCase = no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
elif isinstance(lowercase, PIL.Image.Image ):
_UpperCamelCase = no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 98
| 1
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase__ ( snake_case_ :int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_ :float , snake_case_ :float ) -> bool:
__UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
__UpperCAmelCase = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def lowercase__ ( snake_case_ :int , snake_case_ :Callable[[float], float] , snake_case_ :float = 0.0 , snake_case_ :float = 1.0 , ):
return mean(
function_to_integrate(uniform(snake_case_ , snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowercase__ ( snake_case_ :int , snake_case_ :float = 0.0 , snake_case_ :float = 1.0 ):
def identity_function(snake_case_ :float ) -> float:
return x
__UpperCAmelCase = area_under_curve_estimator(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowercase__ ( snake_case_ :int ):
def function_to_integrate(snake_case_ :float ) -> float:
return sqrt(4.0 - x * x )
__UpperCAmelCase = area_under_curve_estimator(
snake_case_ , snake_case_ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 397
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Any = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , _lowercase : List[str] , _lowercase : Any=False , _lowercase : int=True , _lowercase : Tuple=True , _lowercase : Dict="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Tuple="[UNK]" , _lowercase : int="[SEP]" , _lowercase : Dict="[PAD]" , _lowercase : Optional[Any]="[CLS]" , _lowercase : Optional[int]="[MASK]" , **_lowercase : Union[str, Any] , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : List[str] ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Optional[int] , _lowercase : int ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : Any , _lowercase : List[Any] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : str , _lowercase : Union[str, Any] ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : List[Any] , _lowercase : str ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : List[str] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 397
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class A__ ( _snake_case , unittest.TestCase ):
lowercase = CpmAntTokenizer
lowercase = False
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
A_ = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
A_ = """今天天气真好!"""
A_ = ["""今天""", """天气""", """真""", """好""", """!"""]
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = """今天天气真好!"""
A_ = [tokenizer.bos_token] + tokens
A_ = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
A_ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 288
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "codegen"
lowercase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase__=50400 , UpperCamelCase__=2048 , UpperCamelCase__=2048 , UpperCamelCase__=4096 , UpperCamelCase__=28 , UpperCamelCase__=16 , UpperCamelCase__=64 , UpperCamelCase__=None , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=50256 , UpperCamelCase__=50256 , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = n_ctx
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = rotary_dim
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = use_cache
A_ = bos_token_id
A_ = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase__ ):
# TODO: how to do that better?
A_ = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
A_ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A_ = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A_ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ = seqlen + 2
A_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A_ = common_inputs["""attention_mask"""]
if self.use_past:
A_ = ordered_inputs["""attention_mask"""].dtype
A_ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 13
| 288
| 1
|
"""simple docstring"""
from PIL import Image
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
"""simple docstring"""
UpperCamelCase__ = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(SCREAMING_SNAKE_CASE ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
A__ : List[str]= change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 706
|
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCamelCase : str = get_tests_dir('fixtures')
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = mock.Mock()
snake_case : Dict = 5_0_0
snake_case : Optional[int] = {}
snake_case : Dict = HTTPError
snake_case : List[str] = {}
# Download this model to make sure it's in the cache.
snake_case : Dict = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
snake_case : List[Any] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def UpperCAmelCase ( self ) -> int:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Optional[int]:
snake_case : str = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : int = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case : Tuple = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""test-image-processor""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case : List[Any] = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[Any] = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case : Union[str, Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ) -> Any:
CustomImageProcessor.register_for_auto_class()
snake_case : Tuple = CustomImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case : Dict = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 587
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCamelCase : Optional[Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_UpperCamelCase : Any = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_UpperCamelCase : Dict = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , ):
'''simple docstring'''
lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
lowerCAmelCase = TER(
normalized=_SCREAMING_SNAKE_CASE , no_punct=_SCREAMING_SNAKE_CASE , asian_support=_SCREAMING_SNAKE_CASE , case_sensitive=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = sb_ter.corpus_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 284
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int , a_ : Optional[Any] ):
__a = [0 for i in range(r + 1 )]
# nc0 = 1
__a = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__a = min(a_ , a_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 700
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=__magic_name__ ):
_a = ["""onnx"""]
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> str:
requires_backends(self , ['onnx'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(cls , ['onnx'] )
@classmethod
def UpperCamelCase__ ( cls , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
requires_backends(cls , ['onnx'] )
| 490
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = PegasusConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : int = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ = "google/pegasus-xsum"
@cached_property
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase )
assert self.expected_text == generated_words
def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 663
|
from random import shuffle
import tensorflow as tf
from numpy import array
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE__ : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE__ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE__ : Tuple = 1_00
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE__ : Tuple = [
sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE__ : Any = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE__ : Dict = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE__ : str = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 663
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : str = 250
_lowerCAmelCase : str = ids_tensor((batch_size, length) ,_A )
_lowerCAmelCase : Optional[int] = torch.ones((batch_size, length) ,device=_A ,dtype=torch.float ) / length
return input_ids, scores
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self._get_tensors(5 )
_lowerCAmelCase : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase : int = self._get_tensors(10 )
self.assertTrue(criteria(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MaxLengthCriteria(max_length=10 )
_lowerCAmelCase : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase : Any = self._get_tensors(9 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase : int = self._get_tensors(10 )
self.assertTrue(criteria(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 ,max_new_tokens=5 )
_lowerCAmelCase : str = self._get_tensors(5 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(_A ,_A ) )
_lowerCAmelCase : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length ,10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_tensors(5 )
_lowerCAmelCase : int = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase : Union[str, Any] = MaxTimeCriteria(max_time=0.1 ,initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,11 )
_lowerCAmelCase : List[Any] = validate_stopping_criteria(StoppingCriteriaList() ,11 )
self.assertEqual(len(_A ) ,1 )
| 709
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16
| 0
|
'''simple docstring'''
import random
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = num - 1
__lowercase = 0
while s % 2 == 0:
__lowercase = s // 2
t += 1
for _ in range(5 ):
__lowercase = random.randrange(2 , num - 1 )
__lowercase = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if v != 1:
__lowercase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowercase = i + 1
__lowercase = (v**2) % num
return True
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if num < 2:
return False
__lowercase = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_2_4 ):
while True:
__lowercase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCamelCase_ ):
return num
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 502
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 502
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session')
def _snake_case ():
UpperCamelCase_ = 10
UpperCamelCase_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string')),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'])),
'answers': datasets.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
'id': datasets.Value('int64'),
})
UpperCamelCase_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__lowercase)),
} , features=__lowercase , )
return dataset
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'file.arrow')
dataset.map(cache_file_name=__lowercase)
return filename
# FILE_CONTENT + files
snake_case__ : List[Any] = """\
Text data.
Second line of data."""
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.txt'
UpperCamelCase_ = FILE_CONTENT
with open(__lowercase , 'w') as f:
f.write(__lowercase)
return filename
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
import bza
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.txt.bz2'
UpperCamelCase_ = bytes(__lowercase , 'utf-8')
with bza.open(__lowercase , 'wb') as f:
f.write(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
import gzip
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'file.txt.gz')
UpperCamelCase_ = bytes(__lowercase , 'utf-8')
with gzip.open(__lowercase , 'wb') as f:
f.write(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.txt.lz4'
UpperCamelCase_ = bytes(__lowercase , 'utf-8')
with lza.frame.open(__lowercase , 'wb') as f:
f.write(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.txt.7z'
with pyazr.SevenZipFile(__lowercase , 'w') as archive:
archive.write(__lowercase , arcname=os.path.basename(__lowercase))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase):
import tarfile
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.txt.tar'
with tarfile.TarFile(__lowercase , 'w') as f:
f.add(__lowercase , arcname=os.path.basename(__lowercase))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
import lzma
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.txt.xz'
UpperCamelCase_ = bytes(__lowercase , 'utf-8')
with lzma.open(__lowercase , 'wb') as f:
f.write(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase):
import zipfile
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.txt.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.txt.zst'
UpperCamelCase_ = bytes(__lowercase , 'utf-8')
with zstd.open(__lowercase , 'wb') as f:
f.write(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'file.xml'
UpperCamelCase_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>')
with open(__lowercase , 'w') as f:
f.write(__lowercase)
return filename
snake_case__ : Dict = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
snake_case__ : List[Any] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
snake_case__ : Optional[int] = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
snake_case__ : Optional[int] = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
snake_case__ : Tuple = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='session')
def _snake_case ():
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = datasets.Dataset.from_dict(__lowercase)
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.arrow')
dataset.map(cache_file_name=__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.sqlite')
with contextlib.closing(sqlitea.connect(__lowercase)) as con:
UpperCamelCase_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)')
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.csv')
with open(__lowercase , 'w' , newline='') as f:
UpperCamelCase_ = csv.DictWriter(__lowercase , fieldnames=['col_1', 'col_2', 'col_3'])
writer.writeheader()
for item in DATA:
writer.writerow(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset2.csv')
with open(__lowercase , 'w' , newline='') as f:
UpperCamelCase_ = csv.DictWriter(__lowercase , fieldnames=['col_1', 'col_2', 'col_3'])
writer.writeheader()
for item in DATA:
writer.writerow(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase):
import bza
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.csv.bz2'
with open(__lowercase , 'rb') as f:
UpperCamelCase_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__lowercase , 'wb') as f:
f.write(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.csv.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase))
f.write(__lowercase , arcname=os.path.basename(__lowercase))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.csv.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV')))
f.write(__lowercase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV')))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase)))
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase)))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.parquet')
UpperCamelCase_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
})
with open(__lowercase , 'wb') as f:
UpperCamelCase_ = pq.ParquetWriter(__lowercase , schema=__lowercase)
UpperCamelCase_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__lowercase))] for k in DATA[0]} , schema=__lowercase)
writer.write_table(__lowercase)
writer.close()
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.json')
UpperCamelCase_ = {'data': DATA}
with open(__lowercase , 'w') as f:
json.dump(__lowercase , __lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.json')
UpperCamelCase_ = {'data': DATA_DICT_OF_LISTS}
with open(__lowercase , 'w') as f:
json.dump(__lowercase , __lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.jsonl')
with open(__lowercase , 'w') as f:
for item in DATA:
f.write(json.dumps(__lowercase) + '\n')
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset2.jsonl')
with open(__lowercase , 'w') as f:
for item in DATA:
f.write(json.dumps(__lowercase) + '\n')
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset_312.jsonl')
with open(__lowercase , 'w') as f:
for item in DATA_312:
f.write(json.dumps(__lowercase) + '\n')
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset-str.jsonl')
with open(__lowercase , 'w') as f:
for item in DATA_STR:
f.write(json.dumps(__lowercase) + '\n')
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase):
import gzip
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.txt.gz')
with open(__lowercase , 'rb') as orig_file:
with gzip.open(__lowercase , 'wb') as zipped_file:
zipped_file.writelines(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase):
import gzip
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.jsonl.gz')
with open(__lowercase , 'rb') as orig_file:
with gzip.open(__lowercase , 'wb') as zipped_file:
zipped_file.writelines(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.jsonl.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase))
f.write(__lowercase , arcname=os.path.basename(__lowercase))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.join('nested' , os.path.basename(__lowercase)))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase)))
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase)))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.jsonl.tar'
with tarfile.TarFile(__lowercase , 'w') as f:
f.add(__lowercase , arcname=os.path.basename(__lowercase))
f.add(__lowercase , arcname=os.path.basename(__lowercase))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__lowercase , 'w') as f:
f.add(__lowercase , arcname=os.path.join('nested' , os.path.basename(__lowercase)))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = ['0', '1', '2', '3']
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset.txt')
with open(__lowercase , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = ['0', '1', '2', '3']
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset2.txt')
with open(__lowercase , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = ['0', '1', '2', '3']
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.abc'
with open(__lowercase , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.text.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase))
f.write(__lowercase , arcname=os.path.basename(__lowercase))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase)))
f.write(__lowercase , arcname=os.path.join('main_dir' , os.path.basename(__lowercase)))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.ext.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.basename('unsupported.ext'))
f.write(__lowercase , arcname=os.path.basename('unsupported_2.ext'))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'])
UpperCamelCase_ = str(tmp_path_factory.mktemp('data') / 'dataset_with_unicode_new_lines.txt')
with open(__lowercase , 'w' , encoding='utf-8') as f:
f.write(__lowercase)
return path
@pytest.fixture(scope='session')
def _snake_case ():
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg')
@pytest.fixture(scope='session')
def _snake_case ():
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav')
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data') / 'dataset.img.zip'
with zipfile.ZipFile(__lowercase , 'w') as f:
f.write(__lowercase , arcname=os.path.basename(__lowercase))
f.write(__lowercase , arcname=os.path.basename(__lowercase).replace('.jpg' , '2.jpg'))
return path
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = tmp_path_factory.mktemp('data_dir')
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w') as f:
f.write('foo\n' * 10)
with open(data_dir / 'subdir' / 'test.txt' , 'w') as f:
f.write('bar\n' * 10)
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w') as f:
f.write('bar\n' * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w') as f:
f.write('foo\n' * 10)
with open(data_dir / '.subdir' / 'test.txt' , 'w') as f:
f.write('bar\n' * 10)
return data_dir
| 703
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case__ : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'])
def _snake_case (__lowercase , __lowercase):
inspect_dataset(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.parametrize('path' , ['accuracy'])
def _snake_case (__lowercase , __lowercase):
inspect_metric(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_info(__lowercase , config_name=__lowercase)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_config_info(__lowercase , config_name=__lowercase)
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_names(__lowercase)
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert list(infos.keys()) == expected_configs
UpperCamelCase_ = expected_configs[0]
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_split_names(__lowercase , config_name=__lowercase)
| 618
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : List[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ):
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Any , _lowercase : int = 1 , _lowercase : int = 1_00 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : Optional[float] = None , _lowercase : bool = True , ):
if audio_length_in_s is None:
__UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
__UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__UpperCAmelCase = int(_lowercase )
if sample_size % down_scale_factor != 0:
__UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__UpperCAmelCase = int(_lowercase )
__UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
__UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
# set step values
self.scheduler.set_timesteps(_lowercase , device=audio.device )
__UpperCAmelCase = self.scheduler.timesteps.to(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowercase )
| 49
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__: Optional[Any] = logging.get_logger(__name__)
a__: Any = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''conditional_detr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self,__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=3,__lowerCamelCase=300,__lowerCamelCase=6,__lowerCamelCase=2048,__lowerCamelCase=8,__lowerCamelCase=6,__lowerCamelCase=2048,__lowerCamelCase=8,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=True,__lowerCamelCase="relu",__lowerCamelCase=256,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.02,__lowerCamelCase=1.0,__lowerCamelCase=False,__lowerCamelCase="sine",__lowerCamelCase="resnet50",__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=2,__lowerCamelCase=5,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=5,__lowerCamelCase=2,__lowerCamelCase=0.25,**__lowerCamelCase,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__lowerCamelCase,__lowerCamelCase ):
A__ = backbone_config.get('''model_type''' )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(__lowerCamelCase )
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = cls_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = focal_alpha
super().__init__(is_encoder_decoder=__lowerCamelCase,**__lowerCamelCase )
@property
def UpperCamelCase ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase ( self ):
return self.d_model
def UpperCamelCase ( self ):
A__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def UpperCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase ( self ):
return 1E-5
@property
def UpperCamelCase ( self ):
return 12
| 190
| 0
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def a ( UpperCamelCase_ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ =f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(UpperCamelCase_ )
snake_case__ =''.join(bin(UpperCamelCase_ )[2:].zfill(8 ) for byte in data )
snake_case__ =len(UpperCamelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
snake_case__ =b'=' * ((6 - len(UpperCamelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCamelCase_ ) % 6)
else:
snake_case__ =b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCamelCase_ ) , 6 ) ).encode()
+ padding
)
def a ( UpperCamelCase_ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ =(
'argument should be a bytes-like object or ASCII string, '
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(UpperCamelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
try:
snake_case__ =encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
snake_case__ =encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCamelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
snake_case__ =encoded_data[:-padding]
snake_case__ =''.join(
bin(B64_CHARSET.index(UpperCamelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
snake_case__ =''.join(
bin(B64_CHARSET.index(UpperCamelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
snake_case__ =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCamelCase_ ) , 8 )
]
return bytes(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
from __future__ import annotations
def a ( UpperCamelCase_ : list[float] , UpperCamelCase_ : list[float] ) -> float:
snake_case__ =sorted(numsa + numsa )
snake_case__ , snake_case__ =divmod(len(UpperCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [float(x) for x in input('''Enter the elements of first array: ''').split()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 581
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : int = ['pixel_values']
def __init__( self : Optional[Any] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 255 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = True , **snake_case_ : Optional[int] , ):
super().__init__(**snake_case_ )
__snake_case = size if size is not None else {"height": 384, "width": 384}
__snake_case = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case = do_convert_rgb
def lowerCAmelCase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Dict , ):
__snake_case = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__snake_case = (size["height"], size["width"])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase ( self : List[str] , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : int , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase ( self : Dict , snake_case_ : ImageInput , snake_case_ : Optional[bool] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : PILImageResampling = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[float] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : bool = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Any , ):
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = resample if resample is not None else self.resample
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__snake_case = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
__snake_case = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__snake_case = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case_ )
return encoded_outputs
| 163
|
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: Union[str, Any] = logging.get_logger(__name__)
_lowercase: Tuple = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ ="open-llama"
def __init__( self : List[Any] , lowercase__ : Optional[int]=10_00_00 , lowercase__ : Optional[int]=40_96 , lowercase__ : List[str]=1_10_08 , lowercase__ : Optional[Any]=32 , lowercase__ : Optional[int]=32 , lowercase__ : Dict="silu" , lowercase__ : List[Any]=20_48 , lowercase__ : int=0.0_2 , lowercase__ : Optional[int]=1e-6 , lowercase__ : Optional[Any]=True , lowercase__ : Optional[Any]=0 , lowercase__ : Any=1 , lowercase__ : List[Any]=2 , lowercase__ : Dict=False , lowercase__ : List[str]=True , lowercase__ : Dict=0.1 , lowercase__ : int=0.1 , lowercase__ : Union[str, Any]=True , lowercase__ : Tuple=True , lowercase__ : int=None , **lowercase__ : List[str] , ):
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = initializer_range
_lowerCAmelCase = rms_norm_eps
_lowerCAmelCase = use_cache
_lowerCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , lowercase__ )
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_dropout_prob
_lowerCAmelCase = use_stable_embedding
_lowerCAmelCase = shared_input_output_embedding
_lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
_lowerCAmelCase = self.rope_scaling.get('type' , lowercase__ )
_lowerCAmelCase = self.rope_scaling.get('factor' , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 225
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
def __init__( self : Optional[Any] , lowercase__ : str , lowercase__ : Dict=13 , lowercase__ : Tuple=30 , lowercase__ : Optional[int]=2 , lowercase__ : Tuple=3 , lowercase__ : Dict=True , lowercase__ : Any=True , lowercase__ : int=32 , lowercase__ : int=5 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=37 , lowercase__ : Dict="gelu" , lowercase__ : int=0.1 , lowercase__ : Dict=0.1 , lowercase__ : Any=10 , lowercase__ : Tuple=0.0_2 , lowercase__ : str=3 , lowercase__ : Tuple=0.6 , lowercase__ : Tuple=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Dict ):
_lowerCAmelCase = ViTMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ )
_lowerCAmelCase = (self.image_size // self.patch_size) ** 2
_lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(lowercase__ )
_lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCamelCase__ ={"feature-extraction": ViTMAEModel} if is_torch_available() else {}
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = ViTMAEModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowercase__ )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
_lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase = torch.from_numpy(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase = pt_noise
super().check_pt_tf_models(lowercase__ , lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
_lowerCAmelCase = outputs[0].cpu().numpy()
_lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
_lowerCAmelCase = model_class.from_pretrained(lowercase__ )
model.to(lowercase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
# Make sure we don't have nans
_lowerCAmelCase = after_outputs[0].cpu().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = ViTMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _lowerCamelCase ( ):
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCAmelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowercase__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=lowercase__ , return_tensors='pt' ).to(lowercase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase = ViTMAEConfig()
_lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**lowercase__ , noise=torch.from_numpy(lowercase__ ).to(device=lowercase__ ) )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowercase__ )
_lowerCAmelCase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase__ ) , atol=1e-4 ) )
| 225
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase__ : Optional[int] = False
@skip_mps
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase_ = False
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCAmelCase__ ( cls) -> str:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase)
@classmethod
def lowerCAmelCase__ ( cls) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Any:
torch.manual_seed(0)
UpperCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase , )
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
UpperCamelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
UpperCamelCase__ : List[Any] = CLIPTextModel(UpperCamelCase)
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
UpperCamelCase__ : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase=0) -> str:
if str(UpperCamelCase).startswith('mps'):
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCamelCase)
else:
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCamelCase).manual_seed(UpperCamelCase)
UpperCamelCase__ : List[Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ : Optional[Any] = self.pipeline_class(**UpperCamelCase)
pipe.to(UpperCamelCase)
pipe.set_progress_bar_config(disable=UpperCamelCase)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = pipe(**UpperCamelCase).images
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
UpperCamelCase__ : str = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496])
UpperCamelCase__ : List[str] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCamelCase , 1E-3)
def lowerCAmelCase__ ( self) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4)
def lowerCAmelCase__ ( self) -> str:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def lowerCAmelCase__ ( self) -> List[str]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4)
def lowerCAmelCase__ ( self) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def lowerCAmelCase__ ( self) -> Optional[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4)
def lowerCAmelCase__ ( self) -> Dict:
super().test_save_load_local(expected_max_difference=5E-4)
def lowerCAmelCase__ ( self) -> Dict:
super().test_save_load_optional_components(expected_max_difference=4E-4)
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase)
@classmethod
def lowerCAmelCase__ ( cls) -> str:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : Dict = torch.manual_seed(51)
UpperCamelCase__ : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase , torch_dtype=torch.floataa)
pipe.to('cuda')
UpperCamelCase__ : Optional[int] = 'a painting of an elephant with glasses'
UpperCamelCase__ : Dict = [5, 7]
UpperCamelCase__ : Optional[int] = pipe(
prompt=UpperCamelCase , token_indices=UpperCamelCase , guidance_scale=7.5 , generator=UpperCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
UpperCamelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy')
assert np.abs((expected_image - image).max()) < 5E-1
| 410
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=64 , UpperCamelCase=2 , UpperCamelCase=3 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=[1, 16, 4, 4] , UpperCamelCase=None , ):
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
_SCREAMING_SNAKE_CASE = num_patches + 1
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase_ , )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = ViTHybridModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
a : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
a : List[str] = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
a : Union[str, Any] = False
a : List[str] = False
a : Tuple = False
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase ( self ):
pass
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_SCREAMING_SNAKE_CASE = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def lowercase ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
_SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase_ )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
_SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
_SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
_SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
_SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 714
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _a ( _SCREAMING_SNAKE_CASE : int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
_SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_SCREAMING_SNAKE_CASE = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
_SCREAMING_SNAKE_CASE = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def _a ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ):
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def _a ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ):
def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float:
return x
_SCREAMING_SNAKE_CASE = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def _a ( _SCREAMING_SNAKE_CASE : int ):
def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
_SCREAMING_SNAKE_CASE = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 493
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
_snake_case : List[Any] = {'mobilebert-uncased': 512}
_snake_case : Optional[int] = {}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = MobileBertTokenizer
def __init__( self : Any , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple="[UNK]" , lowerCAmelCase_ : Optional[int]="[SEP]" , lowerCAmelCase_ : Optional[int]="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any=None ) -> List[str]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( __lowerCamelCase : Namespace ):
return TrainCommand(__lowerCamelCase )
class UpperCAmelCase ( A_ ):
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : ArgumentParser ) -> int:
'''simple docstring'''
snake_case : Any = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=snake_case__ , required=snake_case__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=snake_case__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=snake_case__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=snake_case__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=snake_case__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=snake_case__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=snake_case__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=snake_case__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=snake_case__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=snake_case__ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=snake_case__ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=snake_case__ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=snake_case__ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=snake_case__ )
def __init__(self : List[Any] , snake_case__ : Namespace ) -> Tuple:
'''simple docstring'''
snake_case : Any = logging.get_logger("transformers-cli/training" )
snake_case : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=snake_case__ )
snake_case : Any = args.output
snake_case : List[Any] = args.column_label
snake_case : Tuple = args.column_text
snake_case : str = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
snake_case : Optional[int] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
snake_case : Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case : Optional[Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case : Union[str, Any] = args.validation_split
snake_case : Optional[Any] = args.train_batch_size
snake_case : List[str] = args.valid_batch_size
snake_case : List[Any] = args.learning_rate
snake_case : List[Any] = args.adam_epsilon
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 204
| 0
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCamelCase ( lowerCAmelCase__ : ndarray ):
return np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
class UpperCamelCase__ :
def __init__(self : List[Any] , *,
snake_case_ : float = np.inf , snake_case_ : str = "linear" , snake_case_ : float = 0.0 , ):
__a : Optional[Any] = regularization
__a : Optional[int] = gamma
if kernel == "linear":
__a : str = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__a : str = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__a : Any = f"Unknown kernel: {kernel}"
raise ValueError(snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : ndarray , snake_case_ : ndarray ):
return np.dot(snake_case_ , snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : ndarray , snake_case_ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase (self : str , snake_case_ : list[ndarray] , snake_case_ : ndarray ):
__a : Tuple = observations
__a : Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__a) , ) : str = np.shape(snake_case_ )
def to_minimize(snake_case_ : ndarray ) -> float:
__a : Dict = 0
((__a) , ) : Union[str, Any] = np.shape(snake_case_ )
for i in range(snake_case_ ):
for j in range(snake_case_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(snake_case_ )
__a : int = LinearConstraint(snake_case_ , 0 , 0 )
__a : Tuple = Bounds(0 , self.regularization )
__a : Optional[int] = minimize(
snake_case_ , np.ones(snake_case_ ) , bounds=snake_case_ , constraints=[ly_contraint] ).x
__a : Optional[Any] = l_star
# calculating mean offset of separation plane to points
__a : Tuple = 0
for i in range(snake_case_ ):
for j in range(snake_case_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__a : List[str] = s / n
def lowerCAmelCase (self : Any , snake_case_ : ndarray ):
__a : Optional[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , snake_case_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
|
lowercase__ ={
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.355_818,
}
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a : Dict = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(lowerCAmelCase__ )}"
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
| 1
|
import numpy as np
from transformers import Pipeline
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = np.max(lowercase , axis=-1 , keepdims=lowercase )
lowerCamelCase_ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Tuple , **A_ : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = {}
if "second_text" in kwargs:
lowerCamelCase_ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def a__ ( self : Union[str, Any] , A_ : List[str] , A_ : int=None ) -> str:
"""simple docstring"""
return self.tokenizer(A_ , text_pair=A_ , return_tensors=self.framework )
def a__ ( self : List[str] , A_ : int ) -> Optional[Any]:
"""simple docstring"""
return self.model(**A_ )
def a__ ( self : Optional[Any] , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = model_outputs.logits[0].numpy()
lowerCamelCase_ = softmax(A_ )
lowerCamelCase_ = np.argmax(A_ )
lowerCamelCase_ = self.model.config.idalabel[best_class]
lowerCamelCase_ = probabilities[best_class].item()
lowerCamelCase_ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 70
|
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(lowercase , shell=lowercase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(lowercase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(lowercase ) )
if len(lowercase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
return values.split(',' )
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
lowerCamelCase : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 70
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 437
|
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a , __a , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437
| 1
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A_ ( snake_case , snake_case=False ):
try:
SCREAMING_SNAKE_CASE:List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE:int = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE:str = strtobool(snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
A_ = parse_flag_from_env("RUN_SLOW", default=False)
def A_ ( snake_case ):
return unittest.skip("Test was skipped" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(_run_slow_tests , "test is slow" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(snake_case )
def A_ ( snake_case=None , snake_case=None ):
if test_case is None:
return partial(snake_case , version=snake_case )
return unittest.skipUnless(is_torch_version(">=" , snake_case ) , F'''test requires torch version >= {version}''' )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(snake_case )
def A_ ( snake_case ):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(snake_case )
A_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A_ ( snake_case ):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(snake_case )
class _snake_case ( unittest.TestCase ):
_A : Dict = True
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] ):
SCREAMING_SNAKE_CASE:List[str] = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : Optional[Any] ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[mock.Mock, List[mock.Mock]] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = mocks if isinstance(SCREAMING_SNAKE_CASE__ ,(tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Dict = AcceleratorState()
SCREAMING_SNAKE_CASE:List[Any] = tensor[None].clone().to(state.device )
SCREAMING_SNAKE_CASE:List[Any] = gather(snake_case ).cpu()
SCREAMING_SNAKE_CASE:Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case ):
return False
return True
class _snake_case :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:Optional[Any] = returncode
SCREAMING_SNAKE_CASE:int = stdout
SCREAMING_SNAKE_CASE:str = stderr
async def A_ ( snake_case , snake_case ):
while True:
SCREAMING_SNAKE_CASE:Tuple = await stream.readline()
if line:
callback(snake_case )
else:
break
async def A_ ( snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=False , snake_case=False ):
if echo:
print("\nRunning: " , " ".join(snake_case ) )
SCREAMING_SNAKE_CASE:Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE:Optional[Any] = []
SCREAMING_SNAKE_CASE:Optional[int] = []
def tee(snake_case , snake_case , snake_case , snake_case="" ):
SCREAMING_SNAKE_CASE:Tuple = line.decode("utf-8" ).rstrip()
sink.append(snake_case )
if not quiet:
print(snake_case , snake_case , file=snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case : tee(snake_case , snake_case , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case : tee(snake_case , snake_case , sys.stderr , label="stderr:" ) ) ),
] , timeout=snake_case , )
return _RunOutput(await p.wait() , snake_case , snake_case )
def A_ ( snake_case , snake_case=None , snake_case=None , snake_case=180 , snake_case=False , snake_case=True ):
SCREAMING_SNAKE_CASE:Optional[int] = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE:Dict = loop.run_until_complete(
_stream_subprocess(snake_case , env=snake_case , stdin=snake_case , timeout=snake_case , quiet=snake_case , echo=snake_case ) )
SCREAMING_SNAKE_CASE:Any = " ".join(snake_case )
if result.returncode > 0:
SCREAMING_SNAKE_CASE:int = "\n".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _snake_case ( _a ):
pass
def A_ ( snake_case , snake_case=False ):
try:
SCREAMING_SNAKE_CASE:int = subprocess.check_output(snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case , "decode" ):
SCREAMING_SNAKE_CASE:Optional[int] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{" ".join(snake_case )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 143
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _snake_case ( _a ):
_A : List[str] = '''camembert'''
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_522 ,SCREAMING_SNAKE_CASE__ : int=768 ,SCREAMING_SNAKE_CASE__ : List[Any]=12 ,SCREAMING_SNAKE_CASE__ : Any=12 ,SCREAMING_SNAKE_CASE__ : Tuple=3_072 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1e-12 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any="absolute" ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE:str = hidden_size
SCREAMING_SNAKE_CASE:str = num_hidden_layers
SCREAMING_SNAKE_CASE:List[str] = num_attention_heads
SCREAMING_SNAKE_CASE:Optional[int] = hidden_act
SCREAMING_SNAKE_CASE:int = intermediate_size
SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:str = max_position_embeddings
SCREAMING_SNAKE_CASE:Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE:Optional[int] = use_cache
SCREAMING_SNAKE_CASE:List[Any] = classifier_dropout
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : List[str] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE:Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 143
| 1
|
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowercase = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633
|
import os
def UpperCamelCase ( lowerCAmelCase__ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
lowercase = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase = len(lowerCAmelCase__ )
lowercase = len(matrix[0] )
lowercase = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
lowercase = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 633
| 1
|
'''simple docstring'''
from math import factorial
__snake_case : int = {str(d): factorial(d) for d in range(10)}
def _lowercase ( lowerCamelCase__ : int ):
return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase__ ) )
def _lowercase ( ):
_a = 7 * factorial(9 ) + 1
return sum(i for i in range(3, lowerCamelCase__ ) if sum_of_digit_factorial(lowerCamelCase__ ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 131
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Any = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
__lowerCamelCase : List[str] = Dataset.from_dict(UpperCAmelCase )
return dataset
class _UpperCamelCase ( A ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = get_dataset()
__lowerCamelCase : Dict = make_duplicate_clusters(_lowerCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = get_dataset()
__lowerCamelCase , __lowerCamelCase : List[Any] = deduplicate_dataset(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 2 )
print(_lowerCamelCase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowerCamelCase )
| 519
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : str = '''BridgeTowerImageProcessor'''
UpperCamelCase_ : Dict = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : List[Any] , A_ : str , A_ : str ) -> Union[str, Any]:
super().__init__(A_ , A_ )
def __call__( self : Any , A_ : str , A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A_ : bool = True , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Union[bool, str, TruncationStrategy] = None , A_ : Optional[int] = None , A_ : int = 0 , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[bool] = None , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = True , A_ : Optional[Union[str, TensorType]] = None , **A_ : Union[str, Any] , ) -> BatchEncoding:
__snake_case = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
# add pixel_values + pixel_mask
__snake_case = self.image_processor(
A_ , return_tensors=A_ , do_normalize=A_ , do_center_crop=A_ , **A_ )
encoding.update(A_ )
return encoding
def lowercase ( self : Optional[Any] , *A_ : List[str] , **A_ : int ) -> int:
return self.tokenizer.batch_decode(*A_ , **A_ )
def lowercase ( self : Optional[Any] , *A_ : Optional[int] , **A_ : Optional[Any] ) -> Any:
return self.tokenizer.decode(*A_ , **A_ )
@property
def lowercase ( self : str ) -> Dict:
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 712
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase : str = logging.get_logger(__name__)
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = ['''pixel_values''']
def __init__( self : Optional[int] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Dict , ) -> None:
super().__init__(**A_ )
__snake_case = size if size is not None else {'''shortest_edge''': 224}
__snake_case = get_size_dict(A_ , default_to_square=A_ )
__snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__snake_case = get_size_dict(A_ , param_name='''crop_size''' )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
__snake_case = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__snake_case = int((256 / 224) * size['''shortest_edge'''] )
__snake_case = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
__snake_case = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
A_ , size=(size_dict['''height'''], size_dict['''width''']) , resample=A_ , data_format=A_ , **A_ )
def lowercase ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ) -> np.ndarray:
__snake_case = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def lowercase ( self : List[str] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[Any] , ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowercase ( self : Dict , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def lowercase ( self : Union[str, Any] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Any , ) -> BatchFeature:
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(A_ , default_to_square=A_ )
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(A_ , param_name='''crop_size''' )
__snake_case = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(A_ ) for image in images]
if do_resize:
__snake_case = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(A_ , A_ , A_ ) for image in images]
__snake_case = [to_channel_dimension_format(A_ , A_ ) for image in images]
__snake_case = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 93
| 0
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 0
|
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase__ : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase__ : Tuple = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCamelCase__ : Union[str, Any] = [2, 4, 1, 5]
UpperCamelCase__ : Dict = len(train_data)
UpperCamelCase__ : Optional[int] = 0.0_0_9
def UpperCAmelCase ( a_ , a_="train" ) -> Optional[int]:
"""simple docstring"""
return calculate_hypothesis_value(__snake_case , __snake_case ) - output(
__snake_case , __snake_case )
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
A_ : Union[str, Any] = 0
for i in range(len(__snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase ( a_ , a_=m ) -> List[str]:
"""simple docstring"""
A_ : Any = 0
for i in range(__snake_case ):
if index == -1:
summation_value += _error(__snake_case )
else:
summation_value += _error(__snake_case ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
A_ : List[str] = summation_of_cost_derivative(__snake_case , __snake_case ) / m
return cost_derivative_value
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A_ : str = 0.000002
A_ : Union[str, Any] = 0
A_ : List[Any] = 0
while True:
j += 1
A_ : Union[str, Any] = [0, 0, 0, 0]
for i in range(0 , len(__snake_case ) ):
A_ : List[str] = get_cost_derivative(i - 1 )
A_ : int = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__snake_case , __snake_case , atol=__snake_case , rtol=__snake_case , ):
break
A_ : Optional[Any] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
for i in range(len(__snake_case ) ):
print(("""Actual output value:""", output(__snake_case , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__snake_case , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 707
|
'''simple docstring'''
from __future__ import annotations
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Dict:
A_ : List[Any] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(_lowerCamelCase ) != 0:
A_ : List[str] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(_lowerCamelCase , (int, float) ):
raise error
A_ : str = rows
else:
A_ : Optional[int] = []
def UpperCAmelCase_ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.rows )
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.rows[0] )
@property
def UpperCAmelCase_ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase_ ( self ) -> bool:
return self.order[0] == self.order[1]
def UpperCAmelCase_ ( self ) -> Matrix:
A_ : Dict = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCAmelCase_ ( self ) -> bool:
return bool(self.determinant() )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowerCamelCase ).determinant()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(_lowerCamelCase , _lowerCamelCase )
return -1 * self.get_minor(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(_lowerCamelCase , _lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCAmelCase_ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCAmelCase_ ( self ) -> Matrix:
A_ : Union[str, Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Matrix:
A_ : Optional[Any] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(_lowerCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> None:
A_ : str = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(_lowerCamelCase , (int, float) ):
raise type_error
if len(_lowerCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(_lowerCamelCase )
else:
A_ : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> None:
A_ : int = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(_lowerCamelCase , (int, float) ):
raise type_error
if len(_lowerCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
A_ : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A_ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _lowerCamelCase ) -> bool:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _lowerCamelCase ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , _lowerCamelCase ) -> Matrix:
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _lowerCamelCase ) -> Matrix:
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _lowerCamelCase ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(_lowerCamelCase , _lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , _lowerCamelCase ) -> Matrix:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
A_ : Optional[int] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase ) -> int:
return sum(row[i] * column[i] for i in range(len(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
_lowercase = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
_lowercase = '''</w>'''
_lowercase = '''@@ '''
def _snake_case ( snake_case__ : Tuple ):
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_lowercase = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = VOCAB_FILES_NAMES
_lowerCamelCase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any ,A_ : List[str] ,A_ : str="<s>" ,A_ : Optional[int]="<pad>" ,A_ : List[str]="</s>" ,A_ : List[Any]="<unk>" ,A_ : List[str]=False ,A_ : int=None ,**A_ : Union[str, Any] ,) -> Optional[int]:
super().__init__(
unk_token=A_ ,bos_token=A_ ,eos_token=A_ ,pad_token=A_ ,do_lower_case=A_ ,**A_ ,)
A = do_lower_case
with open(A_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(A_ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
A = None
A = None
else:
with open(A_ ,encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = {}
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return len(self.decoder )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return dict(self.encoder ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ) -> Tuple:
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(A_ )
if not pairs:
return token
while True:
A = min(A_ ,key=lambda A_ : self.bpe_ranks.get(A_ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(A_ ):
try:
A = word.index(A_ ,A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(A_ )
A = new_word
if len(A_ ) == 1:
break
else:
A = get_pairs(A_ )
A = ' '.join(A_ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(A_ ):
A = word.replace(A_ ,'' )
A = word.replace(' ' ,A_ )
A = word
return word
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ) -> List[str]:
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ) -> int:
return self.encoder.get(A_ ,self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : int ) -> str:
A = self.decoder.get(A_ ,self.unk_token )
return result
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[str] ) -> str:
A = ' '.join(A_ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(A_ ) )
return string
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(A_ ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 91
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a__ : int = deepcopy(__lowercase )
elif os.path.exists(__lowercase ):
with io.open(__lowercase , """r""" , encoding="""utf-8""" ) as f:
a__ : str = json.load(__lowercase )
else:
try:
a__ : Union[str, Any] = baseaa.urlsafe_baadecode(__lowercase ).decode("""utf-8""" )
a__ : Tuple = json.loads(__lowercase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
a__ : Tuple = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
a__ : Tuple = False
if self.is_zeroa() or self.is_zeroa():
a__ : Any = set(["""cpu""", """nvme"""] )
a__ : List[str] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a__ : Tuple = True
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = self.config
# find the config node of interest if it exists
a__ : Any = ds_key_long.split(""".""" )
a__ : List[Any] = nodes.pop()
for node in nodes:
a__ : Any = config.get(__lowercase )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=None ) -> Any:
"""simple docstring"""
a__ , a__ : int = self.find_config_node(__lowercase )
if config is None:
return default
return config.get(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False ) -> Dict:
"""simple docstring"""
a__ : Dict = self.config
# find the config node of interest if it exists
a__ : Dict = ds_key_long.split(""".""" )
for node in nodes:
a__ : Any = config
a__ : Optional[Any] = config.get(__lowercase )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = self.get_value(__lowercase )
return False if value is None else bool(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : int = self.get_value(__lowercase )
return False if value is None else not bool(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
return self._offload
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = engine
def SCREAMING_SNAKE_CASE__( self , __lowercase , **__lowercase ) -> List[str]:
"""simple docstring"""
self.engine.backward(__lowercase , **__lowercase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> int:
"""simple docstring"""
super().__init__(__lowercase , device_placement=__lowercase , scaler=__lowercase )
a__ : Any = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE__( self , __lowercase=None ) -> int:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> Dict:
"""simple docstring"""
super().__init__(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=0.0_0_1 , __lowercase=0 , **__lowercase ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = params
a__ : List[str] = lr
a__ : List[str] = weight_decay
a__ : Tuple = kwargs
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=None , __lowercase=0 , **__lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = optimizer
a__ : List[Any] = total_num_steps
a__ : Optional[Any] = warmup_num_steps
a__ : Optional[int] = kwargs
| 136
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = torch.device('cpu')
def __lowerCamelCase ( ) -> int:
__UpperCamelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCamelCase : Dict = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Any:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ) -> str:
__UpperCamelCase : List[str] = dct.pop(__lowerCAmelCase )
__UpperCamelCase : Tuple = val
def __lowerCamelCase ( __lowerCAmelCase : str ) -> int:
__UpperCamelCase : Optional[int] = []
for k in state_dict.keys():
__UpperCamelCase : str = k
if ".pwconv" in k:
__UpperCamelCase : List[Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
__UpperCamelCase : Any = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
__UpperCamelCase : Tuple = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
__UpperCamelCase : str = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
__UpperCamelCase : Any = k_new.split(""".""" )
if ls[2].isdigit():
__UpperCamelCase : List[str] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__UpperCamelCase : Union[str, Any] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Dict ) -> str:
__UpperCamelCase : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCamelCase : Dict = 1000
__UpperCamelCase : Dict = """huggingface/label-files"""
__UpperCamelCase : Tuple = """imagenet-1k-id2label.json"""
__UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCamelCase : Any = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__UpperCamelCase : int = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCamelCase : Tuple = [3, 3, 6, 4]
__UpperCamelCase : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__UpperCamelCase : List[str] = [3, 3, 9, 6]
__UpperCamelCase : Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__UpperCamelCase : int = [4, 3, 10, 5]
__UpperCamelCase : Any = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__UpperCamelCase : Union[str, Any] = [4, 4, 12, 6]
__UpperCamelCase : Union[str, Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
__UpperCamelCase : str = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" , check_hash=__lowerCAmelCase )
else:
__UpperCamelCase : int = torch.load(__lowerCAmelCase , map_location="""cpu""" )
__UpperCamelCase : List[str] = checkpoint
__UpperCamelCase : Any = create_rename_keys(__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
__UpperCamelCase : Dict = SwiftFormerForImageClassification(__lowerCAmelCase ).eval()
hf_model.load_state_dict(__lowerCAmelCase )
# prepare test inputs
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : int = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
__UpperCamelCase : Tuple = processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# compare outputs from both models
__UpperCamelCase : Optional[int] = get_expected_output(__lowerCAmelCase )
__UpperCamelCase : Any = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __lowerCAmelCase , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 713
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _A ( UpperCAmelCase_ ):
def __init__( self : Tuple , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ):
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , """decord""" )
self.check_model_type(lowerCamelCase__ )
def a ( self : Tuple , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=None ):
"""simple docstring"""
__UpperCamelCase : Tuple = {}
if frame_sampling_rate is not None:
__UpperCamelCase : Dict = frame_sampling_rate
if num_frames is not None:
__UpperCamelCase : Optional[int] = num_frames
__UpperCamelCase : Dict = {}
if top_k is not None:
__UpperCamelCase : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Any , lowerCamelCase__ : Union[str, List[str]] , **lowerCamelCase__ : Any ):
"""simple docstring"""
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def a ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
__UpperCamelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__UpperCamelCase : Tuple = BytesIO(requests.get(lowerCamelCase__ ).content )
__UpperCamelCase : Dict = VideoReader(lowerCamelCase__ )
videoreader.seek(0 )
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : Any = num_frames * frame_sampling_rate - 1
__UpperCamelCase : Dict = np.linspace(lowerCamelCase__ , lowerCamelCase__ , num=lowerCamelCase__ , dtype=np.intaa )
__UpperCamelCase : Any = videoreader.get_batch(lowerCamelCase__ ).asnumpy()
__UpperCamelCase : Dict = list(lowerCamelCase__ )
__UpperCamelCase : List[str] = self.image_processor(lowerCamelCase__ , return_tensors=self.framework )
return model_inputs
def a ( self : Dict , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : str = self.model(**lowerCamelCase__ )
return model_outputs
def a ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__UpperCamelCase : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
__UpperCamelCase : str = model_outputs.logits.softmax(-1 )[0]
__UpperCamelCase , __UpperCamelCase : Any = probs.topk(lowerCamelCase__ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : List[Any] = scores.tolist()
__UpperCamelCase : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ , lowerCamelCase__ )]
| 515
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Tuple=False )-> List[Any]:
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : int , snake_case : List[str]=False )-> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''
else:
_lowerCamelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> Union[str, Any]:
_lowerCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__A , __A )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : List[Any] , snake_case : Dict )-> Optional[int]:
_lowerCamelCase = dct.pop(__A )
_lowerCamelCase = val
def SCREAMING_SNAKE_CASE_ ( )-> Optional[Any]:
_lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : int )-> Tuple:
_lowerCamelCase = ViTConfig()
_lowerCamelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase = True
_lowerCamelCase = int(vit_name[-12:-10] )
_lowerCamelCase = int(vit_name[-9:-6] )
else:
_lowerCamelCase = 1_000
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = 'imagenet-1k-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(__A ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = int(vit_name[-6:-4] )
_lowerCamelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
_lowerCamelCase = 192
_lowerCamelCase = 768
_lowerCamelCase = 12
_lowerCamelCase = 3
elif vit_name[9:].startswith('small' ):
_lowerCamelCase = 384
_lowerCamelCase = 1_536
_lowerCamelCase = 12
_lowerCamelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
_lowerCamelCase = 768
_lowerCamelCase = 2_304
_lowerCamelCase = 8
_lowerCamelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
_lowerCamelCase = 1_024
_lowerCamelCase = 4_096
_lowerCamelCase = 24
_lowerCamelCase = 16
elif vit_name[4:].startswith('huge' ):
_lowerCamelCase = 1_280
_lowerCamelCase = 5_120
_lowerCamelCase = 32
_lowerCamelCase = 16
# load original model from timm
_lowerCamelCase = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
_lowerCamelCase = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase = ViTModel(__A ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase = ViTImageProcessor(size=config.image_size )
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCamelCase = encoding['pixel_values']
_lowerCamelCase = model(__A )
if base_model:
_lowerCamelCase = timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1e-3 )
else:
_lowerCamelCase = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
A_ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A_ : List[str] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 650
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCamelCase__ = '''Create a default config file for Accelerate with only a few flags set.'''
def lowerCamelCase__ ( __A :List[Any]="no" ,__A :str = default_json_config_file ,__A :bool = False ):
"""simple docstring"""
__snake_case = Path(__A )
path.parent.mkdir(parents=__A ,exist_ok=__A )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__snake_case = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__snake_case = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
__snake_case = torch.cuda.device_count()
__snake_case = num_gpus
__snake_case = False
if num_gpus > 1:
__snake_case = """MULTI_GPU"""
else:
__snake_case = """NO"""
elif is_xpu_available() and use_xpu:
__snake_case = torch.xpu.device_count()
__snake_case = num_xpus
__snake_case = False
if num_xpus > 1:
__snake_case = """MULTI_XPU"""
else:
__snake_case = """NO"""
elif is_npu_available():
__snake_case = torch.npu.device_count()
__snake_case = num_npus
__snake_case = False
if num_npus > 1:
__snake_case = """MULTI_NPU"""
else:
__snake_case = """NO"""
else:
__snake_case = 0
__snake_case = True
__snake_case = 1
__snake_case = """NO"""
__snake_case = ClusterConfig(**__A )
config.to_json_file(__A )
return path
def lowerCamelCase__ ( __A :Dict ,__A :List[Any] ):
"""simple docstring"""
__snake_case = parser.add_parser("""default""" ,parents=__A ,help=__A ,formatter_class=__A )
parser.add_argument(
"""--config_file""" ,default=__A ,help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) ,dest="""save_location""" ,)
parser.add_argument(
"""--mixed_precision""" ,choices=["""no""", """fp16""", """bf16"""] ,type=__A ,help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" ,default="""no""" ,)
parser.set_defaults(func=__A )
return parser
def lowerCamelCase__ ( __A :Optional[Any] ):
"""simple docstring"""
__snake_case = write_basic_config(args.mixed_precision ,args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 268
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE_ = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,"""models/bert/""" ) )
SCREAMING_SNAKE_CASE = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase__ ,"""src/transformers/models/bert/modeling_bert.py""" ) ,os.path.join(self.transformer_dir ,"""models/bert/modeling_bert.py""" ) ,)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,lowerCamelCase__ : str=None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
SCREAMING_SNAKE_CASE = black.format_str(lowerCamelCase__ ,mode=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = os.path.join(self.transformer_dir ,"""new_code.py""" )
with open(lowerCamelCase__ ,"""w""" ,newline="""\n""" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ ,"""r""" ) as f:
self.assertTrue(f.read() ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" ,"""BertLMPredictionHead""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" ,"""BertLMPredictionHead""" ,lowerCamelCase__ ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" ,"""TestModelLMPredictionHead""" ,re.sub("""Bert""" ,"""TestModel""" ,lowerCamelCase__ ) ,)
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" ,F"""{long_class_name}LMPredictionHead""" ,re.sub("""Bert""" ,lowerCamelCase__ ,lowerCamelCase__ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" ,"""TestModelLMPredictionHead""" ,lowerCamelCase__ ,overwrite_result=re.sub("""Bert""" ,"""TestModel""" ,lowerCamelCase__ ) ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCamelCase__ ,lowerCamelCase__ ,localized_readme["""format_model_list"""] )
self.assertFalse(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCamelCase__ ,lowerCamelCase__ ,localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCamelCase__ ,lowerCamelCase__ ,localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 116
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 116
| 1
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1024 , __lowerCAmelCase=1024 , __lowerCAmelCase=3.6 ):
UpperCamelCase__ = tokenizer
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = dataset
UpperCamelCase__ = seq_length
UpperCamelCase__ = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
UpperCamelCase__ = iter(self.dataset )
UpperCamelCase__ = True
while more_examples:
UpperCamelCase__ , UpperCamelCase__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__lowerCAmelCase )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCamelCase__ = False
break
UpperCamelCase__ = tokenizer(__lowerCAmelCase , truncation=__lowerCAmelCase )["""input_ids"""]
UpperCamelCase__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__lowerCAmelCase ) , self.seq_length ):
UpperCamelCase__ = all_token_ids[i : i + self.seq_length]
if len(__lowerCAmelCase ) == self.seq_length:
yield torch.tensor(__lowerCAmelCase )
def _UpperCamelCase (a__ :Tuple ):
"""simple docstring"""
UpperCamelCase__ = {"""streaming""": True}
UpperCamelCase__ = load_dataset(args.dataset_name , split="""train""" , **a__ )
UpperCamelCase__ = ConstantLengthDataset(a__ , a__ , seq_length=args.seq_length )
UpperCamelCase__ = DataLoader(a__ , batch_size=args.batch_size )
return eval_dataloader
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
model.eval()
UpperCamelCase__ = []
for step, batch in enumerate(a__ ):
with torch.no_grad():
UpperCamelCase__ = model(a__ , labels=a__ )
UpperCamelCase__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCamelCase__ = torch.mean(torch.cat(a__ ) )
try:
UpperCamelCase__ = torch.exp(a__ )
except OverflowError:
UpperCamelCase__ = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCamelCase__ = Accelerator()
# Parse configuration
UpperCamelCase__ = HfArgumentParser(EvaluationArguments)
UpperCamelCase__ = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCamelCase__ = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCamelCase__ = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
UpperCamelCase__ , UpperCamelCase__ = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 619
|
def _UpperCamelCase (a__ :int = 1000 ):
"""simple docstring"""
UpperCamelCase__ = 2**power
UpperCamelCase__ = 0
while n:
UpperCamelCase__ , UpperCamelCase__ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 619
| 1
|
import operator
def lowercase_ ( _A : List[str] , _A : Union[str, Any] = False , _A : int = None ):
"""simple docstring"""
lowerCamelCase__ : int = operator.lt if reverse else operator.gt
lowerCamelCase__ : Tuple = solution or []
if not arr:
return solution
lowerCamelCase__ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
lowerCamelCase__ : Tuple = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 711
|
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," )]
for line in input_file.readlines()
]
lowerCamelCase__ : Optional[Any] = len(_A )
lowerCamelCase__ : Union[str, Any] = len(matrix[0] )
lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
lowerCamelCase__ : Optional[Any] = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
lowerCamelCase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCamelCase__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 5
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hidden_states.shape
SCREAMING_SNAKE_CASE_ = jax.image.resize(
_lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
SCREAMING_SNAKE_CASE_ = self.conv(_lowerCAmelCase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , _lowerCAmelCase : int ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE_ = self.conv(_lowerCAmelCase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = None
lowercase_ = 0.0
lowercase_ = None
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Dense(_lowerCAmelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE_ = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=True ):
SCREAMING_SNAKE_CASE_ = hidden_states
SCREAMING_SNAKE_CASE_ = self.norma(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.swish(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conva(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.time_emb_proj(nn.swish(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(jnp.expand_dims(_lowerCAmelCase , 1 ) , 1 )
SCREAMING_SNAKE_CASE_ = hidden_states + temb
SCREAMING_SNAKE_CASE_ = self.norma(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.swish(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dropout(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conva(_lowerCAmelCase )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE_ = self.conv_shortcut(_lowerCAmelCase )
return hidden_states + residual
| 31
|
'''simple docstring'''
import sys
from collections import defaultdict
class a :
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
snake_case__ : str = []
def __magic_name__ ( self : Any , snake_case_ : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def __magic_name__ ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : Optional[int] = pos
def __magic_name__ ( self : Any , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : int ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case__ : List[str] = 2 * start + 1
else:
snake_case__ : Optional[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case__ , snake_case__ : Dict = heap[smallest_child], positions[smallest_child]
snake_case__ , snake_case__ : Any = (
heap[start],
positions[start],
)
snake_case__ , snake_case__ : Optional[int] = temp, tempa
snake_case__ : Optional[int] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Union[str, Any] = position[index]
while index != 0:
snake_case__ : List[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case__ : Optional[Any] = heap[parent]
snake_case__ : Union[str, Any] = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
snake_case__ : Optional[Any] = val
snake_case__ : Optional[int] = temp
self.set_position(snake_case_ , snake_case_ )
break
snake_case__ : Any = parent
else:
snake_case__ : Any = val
snake_case__ : List[Any] = temp
self.set_position(snake_case_ , 0 )
def __magic_name__ ( self : List[str] , snake_case_ : Tuple , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : Tuple = positions[0]
snake_case__ : str = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def _a ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
snake_case__ : Optional[int] = Heap()
snake_case__ : Optional[int] = [0] * len(__lowerCAmelCase )
snake_case__ : Optional[Any] = [-1] * len(__lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case__ : Optional[Any] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case__ : Tuple = []
for vertex in range(len(__lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCAmelCase )
heap.node_position.append(__lowerCAmelCase )
snake_case__ : str = []
snake_case__ : Optional[int] = 1
snake_case__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case__ : str = 0
snake_case__ : Optional[Any] = distance
heap.heapify(__lowerCAmelCase , __lowerCAmelCase )
for _ in range(1 , len(__lowerCAmelCase ) ):
snake_case__ : Optional[int] = heap.delete_minimum(__lowerCAmelCase , __lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case__ : Dict = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCAmelCase )]
):
snake_case__ : Tuple = distance
heap.bottom_to_top(
__lowerCAmelCase , heap.get_position(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Dict = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCAmelCase__ : Tuple = int(input("""Enter number of edges: """).strip())
lowerCAmelCase__ : Optional[Any] = defaultdict(list)
for _ in range(edges_number):
lowerCAmelCase__ : Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 347
| 0
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def a__ ( *_a , **_a ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a__ ( self , _a , _a , _a ) -> Tuple:
_A : Any = DepthEstimationPipeline(model=_a , image_processor=_a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a__ ( self , _a , _a ) -> str:
_A : List[Any] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , _a )
import datasets
_A : int = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
_A : str = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , _a , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def a__ ( self ) -> str:
pass
@slow
@require_torch
def a__ ( self ) -> Optional[Any]:
_A : Tuple = """Intel/dpt-large"""
_A : Dict = pipeline("""depth-estimation""" , model=_a )
_A : Any = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
_A : Dict = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def a__ ( self ) -> int:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 720
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[int] = "levit"
def __init__( self , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[128, 256, 384] , SCREAMING_SNAKE_CASE__=[4, 8, 12] , SCREAMING_SNAKE_CASE__=[4, 4, 4] , SCREAMING_SNAKE_CASE__=[16, 16, 16] , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=0.0_2 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = image_size
A__ = num_channels
A__ = kernel_size
A__ = stride
A__ = padding
A__ = hidden_sizes
A__ = num_attention_heads
A__ = depths
A__ = key_dim
A__ = drop_path_rate
A__ = patch_size
A__ = attention_ratio
A__ = mlp_ratio
A__ = initializer_range
A__ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : int = version.parse("1.11" )
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ) -> float:
return 1e-4
| 104
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_UpperCamelCase : int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _lowercase ( __a ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : List[Any] , ) -> str:
'''simple docstring'''
__UpperCamelCase =path_or_paths
__UpperCamelCase =split if split or isinstance(UpperCamelCase__ , UpperCamelCase__ ) else '''train'''
__UpperCamelCase =features
__UpperCamelCase =cache_dir
__UpperCamelCase =keep_in_memory
__UpperCamelCase =streaming
__UpperCamelCase =num_proc
__UpperCamelCase =kwargs
@abstractmethod
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class _lowercase ( __a ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =features
__UpperCamelCase =cache_dir
__UpperCamelCase =keep_in_memory
__UpperCamelCase =streaming
__UpperCamelCase =num_proc
__UpperCamelCase =kwargs
@abstractmethod
def UpperCAmelCase_ ( self : str ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 706
|
"""simple docstring"""
import qiskit
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCamelCase =qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 296
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = BlipImageProcessor()
__UpperCAmelCase : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__UpperCAmelCase : Optional[Any] = BlipProcessor(__lowercase , __lowercase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : int , **__lowercase : Optional[Any] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).tokenizer
def UpperCAmelCase ( self : Optional[Any] , **__lowercase : int ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def UpperCAmelCase ( self : int ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Optional[Any] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__UpperCAmelCase : List[str] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : str = image_processor(__lowercase , return_tensors="""np""" )
__UpperCAmelCase : Dict = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : str = """lower newer"""
__UpperCAmelCase : int = processor(text=__lowercase )
__UpperCAmelCase : Any = tokenizer(__lowercase , return_token_type_ids=__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self : Dict ) -> Any:
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Dict = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Union[str, Any] = """lower newer"""
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : int = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : str = processor.batch_decode(__lowercase )
__UpperCAmelCase : Dict = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : List[Any] = """lower newer"""
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(text=__lowercase , images=__lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 63
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63
| 1
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = len(lowerCAmelCase__ )
A__ = len(lowerCAmelCase__ )
A__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ = True
for i in range(lowerCAmelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ = True
if a[i].islower():
A__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Union[str, Any] = """timm_backbone"""
def __init__( self , __a=None , __a=3 , __a=True , __a=True , __a=None , **__a , ):
"""simple docstring"""
super().__init__(**__a )
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 554
| 1
|
import numpy as np
def a ( A__ : np.array ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = 42
_a = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 291
| 1
|
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Optional[Any] =logging.get_logger(__name__)
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__A : List[Any] = os.path.abspath(__SCREAMING_SNAKE_CASE )
logger.info(F"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__A : List[str] = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__A : Any = []
__A : Dict = []
__A : Dict = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__A : Union[str, Any] = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(F"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__A : List[Any] = name[1:]
# figure out how many levels deep the name is
__A : int = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(__SCREAMING_SNAKE_CASE )
# read data
__A : Dict = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
names.append("""/""".join(__SCREAMING_SNAKE_CASE ) )
arrays.append(__SCREAMING_SNAKE_CASE )
logger.info(F"Read a total of {len(__SCREAMING_SNAKE_CASE ):,} layers" )
# Sanity check
if len(set(__SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(F"Found layer names with different depths (layer depth {list(set(__SCREAMING_SNAKE_CASE ) )})" )
__A : Optional[int] = list(set(__SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__A : Tuple = full_name.split("""/""" )
__A : str = model
__A : List[str] = []
for i, m_name in enumerate(__SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
__A : str = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
__A : Dict = getattr(__SCREAMING_SNAKE_CASE , """embeddings""" )
__A : int = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
__A : List[str] = getattr(__SCREAMING_SNAKE_CASE , """encoder""" )
__A : Dict = getattr(__SCREAMING_SNAKE_CASE , """layer""" )
__A : List[str] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
__A : List[str] = getattr(__SCREAMING_SNAKE_CASE , """pooler""" )
__A : Any = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
__A : List[str] = getattr(__SCREAMING_SNAKE_CASE , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
__A : List[str] = getattr(__SCREAMING_SNAKE_CASE , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
__A : List[Any] = getattr(__SCREAMING_SNAKE_CASE , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
__A : List[Any] = getattr(__SCREAMING_SNAKE_CASE , """token_type_embeddings""" )
else:
raise ValueError(F"Unknown embedding layer with name {full_name}" )
trace.append("""weight""" )
__A : int = getattr(__SCREAMING_SNAKE_CASE , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
__A : List[str] = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__A : Dict = getattr(__SCREAMING_SNAKE_CASE , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
__A : Tuple = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__A : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__A : Dict = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
__A : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__A : Tuple = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__A : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
__A : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__A : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
__A : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__A : List[str] = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
__A : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
__A : Any = getattr(__SCREAMING_SNAKE_CASE , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
__A : str = getattr(__SCREAMING_SNAKE_CASE , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
__A : int = getattr(__SCREAMING_SNAKE_CASE , """intermediate""" )
__A : str = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
__A : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
__A : List[str] = getattr(__SCREAMING_SNAKE_CASE , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
__A : str = getattr(__SCREAMING_SNAKE_CASE , """weight""" )
else:
logger.warning(F"Ignored {m_name}" )
# for certain layers reshape is necessary
__A : int = """.""".join(__SCREAMING_SNAKE_CASE )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , __SCREAMING_SNAKE_CASE ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , __SCREAMING_SNAKE_CASE ):
__A : Tuple = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__A : Dict = array.transpose()
if pointer.shape == array.shape:
__A : Dict = torch.from_numpy(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(
F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
F" {array.shape}" )
logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def A_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
logger.info(F"Loading model based on config from {config_path}..." )
__A : Union[str, Any] = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__A : Tuple = BertModel(__SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
A__ : str =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 720
|
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__A : Optional[int] = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__A : Optional[int] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
__A : Union[str, Any] = primes[:idx]
break
__A , __A : int = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__A : int = False
for r in range(__SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = pow(__SCREAMING_SNAKE_CASE , d * 2**r , __SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__A : Optional[int] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def A_ ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 499
| 0
|
"""simple docstring"""
from math import sqrt
def __a ( A ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( A = 10_001 ) -> int:
'''simple docstring'''
A__ = 0
A__ = 1
while count != nth and number < 3:
number += 1
if is_prime(A ):
count += 1
while count != nth:
number += 2
if is_prime(A ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337
| 1
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[int]):
lowerCamelCase : str = AutoConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
lowerCamelCase : Dict = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase__)
model.save_pretrained(UpperCAmelCase__)
AutoTokenizer.from_pretrained(UpperCAmelCase__).save_pretrained(UpperCAmelCase__)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 449
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A )
lowerCamelCase : Union[str, Any] = -1
lowerCamelCase : Optional[Any] = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(A )
lowerCamelCase : List[Any] = model.generate(A, max_new_tokens=10, do_sample=A )
lowerCamelCase : int = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase : Optional[int] = TextStreamer(A )
model.generate(A, max_new_tokens=10, do_sample=A, streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase : int = cs.out[:-1]
self.assertEqual(A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A )
lowerCamelCase : List[Any] = -1
lowerCamelCase : Optional[int] = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(A )
lowerCamelCase : Any = model.generate(A, max_new_tokens=10, do_sample=A )
lowerCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase : Union[str, Any] = TextIteratorStreamer(A )
lowerCamelCase : Tuple = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCamelCase : List[Any] = Thread(target=model.generate, kwargs=A )
thread.start()
lowerCamelCase : List[Any] = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase : int = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A )
lowerCamelCase : Optional[Any] = -1
lowerCamelCase : Union[str, Any] = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(A )
lowerCamelCase : Union[str, Any] = model.generate(A, max_new_tokens=10, do_sample=A )
lowerCamelCase : int = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase : Tuple = TextStreamer(A, skip_prompt=A )
model.generate(A, max_new_tokens=10, do_sample=A, streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('distilgpt2' )
lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(A )
lowerCamelCase : Dict = -1
lowerCamelCase : Any = torch.ones((1, 5), device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase : int = TextStreamer(A, skip_special_tokens=A )
model.generate(A, max_new_tokens=1, do_sample=A, streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase : List[str] = cs.out[:-1] # Remove the final "\n"
lowerCamelCase : Optional[Any] = tokenizer(A, return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase : List[str] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A )
lowerCamelCase : Any = -1
lowerCamelCase : int = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(A )
lowerCamelCase : int = TextIteratorStreamer(A, timeout=0.001 )
lowerCamelCase : Any = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCamelCase : Optional[Any] = Thread(target=model.generate, kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
lowerCamelCase : Dict = ''
for new_text in streamer:
streamer_text += new_text
| 449
| 1
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = False , _UpperCamelCase = 100 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1 , ):
"""simple docstring"""
lowercase_ : Tuple = False
lowercase_ : List[Any] = search_prob
lowercase_ : Union[str, Any] = start_temperate
lowercase_ : Optional[Any] = []
lowercase_ : Optional[int] = 0
lowercase_ : Any = None
while not search_end:
lowercase_ : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase_ : str = current_state
scores.append(__A )
iterations += 1
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase_ : str = random.randint(0 , len(__A ) - 1 ) # picking a random neighbor
lowercase_ : Optional[Any] = neighbors.pop(__A )
lowercase_ : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase_ : List[str] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase_ : Dict = picked_neighbor
else:
lowercase_ : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase_ : Union[str, Any] = picked_neighbor
lowercase_ : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase_ : Tuple = True
else:
lowercase_ : Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__A ) , __A )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCamelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCamelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
UpperCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
UpperCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCamelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
| 620
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=7 , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: int=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[Any]=99 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Union[str, Any]=4 , __lowerCamelCase: Any=37 , __lowerCamelCase: List[str]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=512 , __lowerCamelCase: Union[str, Any]=16 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Any=3 , __lowerCamelCase: Any=4 , __lowerCamelCase: str=None , ):
'''simple docstring'''
UpperCamelCase__: List[Any] = parent
UpperCamelCase__: Union[str, Any] = 13
UpperCamelCase__: int = 7
UpperCamelCase__: int = True
UpperCamelCase__: int = True
UpperCamelCase__: Union[str, Any] = True
UpperCamelCase__: str = True
UpperCamelCase__: Optional[Any] = 99
UpperCamelCase__: str = 384
UpperCamelCase__: Dict = 2
UpperCamelCase__: Optional[Any] = 4
UpperCamelCase__: Union[str, Any] = 37
UpperCamelCase__: str = "gelu"
UpperCamelCase__: Union[str, Any] = 0.1
UpperCamelCase__: Union[str, Any] = 0.1
UpperCamelCase__: List[Any] = 512
UpperCamelCase__: Dict = 16
UpperCamelCase__: Union[str, Any] = 2
UpperCamelCase__: Optional[Any] = 0.02
UpperCamelCase__: Optional[int] = 3
UpperCamelCase__: Optional[Any] = 4
UpperCamelCase__: int = 128
UpperCamelCase__: Union[str, Any] = 2
UpperCamelCase__: Optional[int] = 9
UpperCamelCase__: Any = 1
UpperCamelCase__: Optional[Any] = None
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__: Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase__: int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__: str = None
if self.use_token_type_ids:
UpperCamelCase__: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__: str = None
UpperCamelCase__: str = None
UpperCamelCase__: Union[str, Any] = None
if self.use_labels:
UpperCamelCase__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__: List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__: Tuple = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: str = TFConvBertModel(config=__lowerCamelCase )
UpperCamelCase__: Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__: str = [input_ids, input_mask]
UpperCamelCase__: str = model(__lowerCamelCase )
UpperCamelCase__: Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = TFConvBertForMaskedLM(config=__lowerCamelCase )
UpperCamelCase__: Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: str = self.num_labels
UpperCamelCase__: Any = TFConvBertForSequenceClassification(config=__lowerCamelCase )
UpperCamelCase__: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.num_choices
UpperCamelCase__: Dict = TFConvBertForMultipleChoice(config=__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Tuple = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__: Dict = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase__: List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.num_labels
UpperCamelCase__: str = TFConvBertForTokenClassification(config=__lowerCamelCase )
UpperCamelCase__: Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
UpperCamelCase__: List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__: Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
): List[Any] = config_and_inputs
UpperCamelCase__: Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = TFConvBertModelTester(self )
UpperCamelCase__: Any = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: str = True
UpperCamelCase__: Union[str, Any] = True
if hasattr(__lowerCamelCase , "use_cache" ):
UpperCamelCase__: int = True
UpperCamelCase__: List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Optional[Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase__: List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = model_class(__lowerCamelCase )
UpperCamelCase__: List[str] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
UpperCamelCase__: str = os.path.join(__lowerCamelCase , "saved_model" , "1" )
UpperCamelCase__: Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
UpperCamelCase__: Any = model(__lowerCamelCase )
if self.is_encoder_decoder:
UpperCamelCase__: int = outputs["encoder_hidden_states"]
UpperCamelCase__: str = outputs["encoder_attentions"]
else:
UpperCamelCase__: str = outputs["hidden_states"]
UpperCamelCase__: Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
UpperCamelCase__: Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Tuple = True
UpperCamelCase__: int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase__: Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
UpperCamelCase__: Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase: List[Any] ):
UpperCamelCase__: Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__: Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase: List[str] ):
UpperCamelCase__: str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__: int = True
UpperCamelCase__: Tuple = False
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: List[Any] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase__: Optional[int] = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
UpperCamelCase__: List[str] = model_class(__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__: List[Any] = True
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: int = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
UpperCamelCase__: List[Any] = True
UpperCamelCase__: Any = True
UpperCamelCase__: int = model_class(__lowerCamelCase )
UpperCamelCase__: Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase__: Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__: Dict = model(__lowerCamelCase )[0]
UpperCamelCase__: Tuple = [1, 6, 768]
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase__: Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 221
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: int = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
UpperCamelCase__: Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__: Union[str, Any] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: str = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
UpperCamelCase__: List[Any] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Dict = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
UpperCamelCase__: Dict = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
| 221
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = "visual_bert"
def __init__( self : Optional[Any] , A__ : Optional[int]=3_05_22 , A__ : Optional[int]=7_68 , A__ : Dict=5_12 , A__ : int=12 , A__ : Union[str, Any]=12 , A__ : Any=30_72 , A__ : Dict="gelu" , A__ : Optional[Any]=0.1 , A__ : Optional[Any]=0.1 , A__ : Tuple=5_12 , A__ : Optional[int]=2 , A__ : int=0.02 , A__ : Optional[int]=1E-12 , A__ : Optional[int]=False , A__ : List[str]=True , A__ : Optional[Any]=1 , A__ : Optional[int]=0 , A__ : int=2 , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = visual_embedding_dim
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : List[str] = initializer_range
snake_case_ : Tuple = type_vocab_size
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : List[Any] = bypass_transformer
snake_case_ : int = special_visual_initialize
| 666
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666
| 1
|
def __UpperCamelCase ( _A ):
if len(_A ) < 2:
return collection
def circle_sort_util(_A , _A , _A ) -> bool:
lowerCAmelCase_ = False
if low == high:
return swapped
lowerCAmelCase_ = low
lowerCAmelCase_ = high
while left < right:
if collection[left] > collection[right]:
lowerCAmelCase_ , lowerCAmelCase_ = (
collection[right],
collection[left],
)
lowerCAmelCase_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCAmelCase_ , lowerCAmelCase_ = (
collection[right + 1],
collection[left],
)
lowerCAmelCase_ = True
lowerCAmelCase_ = low + int((high - low) / 2 )
lowerCAmelCase_ = circle_sort_util(_A , _A , _A )
lowerCAmelCase_ = circle_sort_util(_A , mid + 1 , _A )
return swapped or left_swap or right_swap
lowerCAmelCase_ = True
while is_not_sorted is True:
lowerCAmelCase_ = circle_sort_util(_A , 0 , len(_A ) - 1 )
return collection
if __name__ == "__main__":
_A = input('''Enter numbers separated by a comma:\n''').strip()
_A = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 325
|
from __future__ import annotations
_A = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase_ = {}
lowerCAmelCase_ = source_vertex
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.source_vertex}
lowerCAmelCase_ = None
lowerCAmelCase_ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
lowerCAmelCase_ = vertex
queue.append(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase_ = self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
lowerCAmelCase_ = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + f"->{target_vertex}"
if __name__ == "__main__":
_A = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 325
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a__ :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , ) -> Optional[int]:
__A= parent
__A= 13
__A= 7
__A= True
__A= True
__A= False
__A= True
__A= 99
__A= 32
__A= 2
__A= 4
__A= 37
__A= 'gelu'
__A= 0.1
__A= 0.1
__A= 512
__A= 16
__A= 2
__A= 0.02
__A= 3
__A= 4
__A= None
def lowerCAmelCase ( self : Optional[Any] ) -> str:
__A= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A= None
if self.use_input_mask:
__A= random_attention_mask([self.batch_size, self.seq_length] )
__A= None
__A= None
__A= None
if self.use_labels:
__A= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A= ids_tensor([self.batch_size] , self.num_choices )
__A= DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] ) -> Any:
__A= TFDistilBertModel(config=lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
__A= [input_ids, input_mask]
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__A= TFDistilBertForMaskedLM(config=lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> int:
__A= TFDistilBertForQuestionAnswering(config=lowerCAmelCase_ )
__A= {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
__A= self.num_labels
__A= TFDistilBertForSequenceClassification(lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__A= self.num_choices
__A= TFDistilBertForMultipleChoice(lowerCAmelCase_ )
__A= tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A= tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A= {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
__A= self.num_labels
__A= TFDistilBertForTokenClassification(lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
__A= self.prepare_config_and_inputs()
((__A), (__A), (__A), (__A), (__A), (__A))= config_and_inputs
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a__ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A : Optional[int] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : str = False
A : List[Any] = False
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__A= TFDistilBertModelTester(self )
__A= ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowerCAmelCase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def lowerCAmelCase ( self : str ) -> Any:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__A= TFDistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> List[Any]:
__A= TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
__A= tf.constant([[0, 1, 2, 3, 4, 5]] )
__A= model(lowerCAmelCase_ )[0]
__A= [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase_ )
__A= tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
| 186
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ = 1_0_0
UpperCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__A= set()
__A= 42
__A= 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int = 5000 ):
"""simple docstring"""
for number_to_partition in range(1,_SCREAMING_SNAKE_CASE ):
if len(partition(_SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 186
| 1
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase =re.compile(R"\s+")
def snake_case ( a_ : List[Any] ) -> str:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(a_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def snake_case ( a_ : int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = [len(a_ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(a_ ), "line_max": max(a_ )}
def snake_case ( a_ : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def snake_case ( a_ : List[Any] , a_ : str ) -> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def snake_case ( a_ : Optional[int] , a_ : Tuple=5 ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ["""auto-generated""", """autogenerated""", """automatically generated"""]
UpperCamelCase_ : Any = example["""content"""].splitlines()
for _, line in zip(range(a_ ) , a_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case ( a_ : Union[str, Any] , a_ : Any=5 , a_ : List[str]=0.05 ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = ["""unit tests""", """test file""", """configuration file"""]
UpperCamelCase_ : Union[str, Any] = example["""content"""].splitlines()
UpperCamelCase_ : List[str] = 0
UpperCamelCase_ : Optional[int] = 0
# first test
for _, line in zip(range(a_ ) , a_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCamelCase_ : str = example["""content"""].count("""\n""" )
UpperCamelCase_ : List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case ( a_ : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ["""def """, """class """, """for """, """while """]
UpperCamelCase_ : Optional[Any] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case ( a_ : List[str] , a_ : Tuple=4 ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = example["""content"""].splitlines()
UpperCamelCase_ : Any = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case ( a_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = tokenizer(example["""content"""] , truncation=a_ )["""input_ids"""]
UpperCamelCase_ : Any = len(example["""content"""] ) / len(a_ )
return {"ratio": ratio}
def snake_case ( a_ : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = {}
results.update(get_hash(a_ ) )
results.update(line_stats(a_ ) )
results.update(alpha_stats(a_ ) )
results.update(char_token_ratio(a_ ) )
results.update(is_autogenerated(a_ ) )
results.update(is_config_or_test(a_ ) )
results.update(has_no_keywords(a_ ) )
results.update(has_few_assignments(a_ ) )
return results
def snake_case ( a_ : Optional[Any] , a_ : Union[str, Any] , a_ : int ) -> int:
"""simple docstring"""
if not check_uniques(a_ , a_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case ( a_ : Optional[int] ) -> int:
"""simple docstring"""
with open(a_ , """rb""" ) as f_in:
with gzip.open(str(a_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(a_ , a_ )
os.unlink(a_ )
# Settings
UpperCamelCase =HfArgumentParser(PreprocessingArguments)
UpperCamelCase =parser.parse_args()
if args.num_workers is None:
UpperCamelCase =multiprocessing.cpu_count()
UpperCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase =time.time()
UpperCamelCase =load_dataset(args.dataset_name, split="train")
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
UpperCamelCase =time.time()
UpperCamelCase =ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
UpperCamelCase =set(ds.unique("hash"))
UpperCamelCase =len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
UpperCamelCase =time.time()
UpperCamelCase =ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase =time.time()
UpperCamelCase , UpperCamelCase =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
UpperCamelCase =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCamelCase =output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCamelCase =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase =str(data_dir / f"file-{file_number+1:012}.json")
UpperCamelCase =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 716
|
'''simple docstring'''
def snake_case ( a_ : int ) -> int:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
UpperCamelCase_ , UpperCamelCase_ : str = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase_ , UpperCamelCase_ : Tuple = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 543
| 0
|
"""simple docstring"""
import os
from collections.abc import Iterator
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowerCamelCase ):
_lowerCAmelCase : Tuple = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowerCamelCase ,_lowerCamelCase ).lstrip("""./""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Dict:
return f"{i * ' '}*" if i else "\n##"
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(_lowerCamelCase )} {new_part.replace('_' ,' ' ).title()}" )
return new_path
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = "." ) -> None:
_lowerCAmelCase : Any = """"""
for filepath in sorted(good_file_paths(_lowerCamelCase ) ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = os.path.split(_lowerCamelCase )
if filepath != old_path:
_lowerCAmelCase : Union[str, Any] = print_path(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
_lowerCAmelCase : Tuple = f"{filepath}/{filename}".replace(""" """ ,"""%20""" )
_lowerCAmelCase : Optional[int] = os.path.splitext(filename.replace("""_""" ,""" """ ).title() )[0]
print(f"{md_prefix(_lowerCamelCase )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('.')
| 213
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_UpperCamelCase : int = "ssube/stable-diffusion-x4-upscaler-onnx"
def __A ( self , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
_lowerCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def __A ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ):
_lowerCAmelCase : str = ort.SessionOptions()
_lowerCAmelCase : Tuple = False
return options
def __A ( self ):
_lowerCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pipe(
prompt=a__ , image=a__ , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __A ( self ):
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : Union[str, Any] = init_image.resize((128, 128) )
_lowerCAmelCase : int = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_lowerCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=a__ , image=a__ , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 213
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowercase = get_tests_dir('''fixtures''')
__lowercase = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__lowercase = get_tests_dir('''fixtures/dummy-config.json''')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = 0
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''')
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = AutoFeatureExtractor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :Tuple = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__UpperCamelCase :List[Any] = AutoFeatureExtractor.from_pretrained(__lowercase).to_dict()
config_dict.pop('''feature_extractor_type''')
__UpperCamelCase :str = WavaVecaFeatureExtractor(**__lowercase)
# save in new folder
model_config.save_pretrained(__lowercase)
config.save_pretrained(__lowercase)
__UpperCamelCase :Any = AutoFeatureExtractor.from_pretrained(__lowercase)
# make sure private variable is not incorrectly saved
__UpperCamelCase :int = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = AutoFeatureExtractor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
with self.assertRaisesRegex(
__lowercase , '''bert-base is not a local folder and is not a valid model identifier'''):
__UpperCamelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained('''bert-base''')
def UpperCamelCase__ ( self) -> Any:
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCamelCase :Any = AutoFeatureExtractor.from_pretrained(__lowercase , revision='''aaaaaa''')
def UpperCamelCase__ ( self) -> List[str]:
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__UpperCamelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''')
def UpperCamelCase__ ( self) -> List[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase):
__UpperCamelCase :List[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase):
__UpperCamelCase :Dict = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowercase)
__UpperCamelCase :Tuple = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained(__lowercase , trust_remote_code=__lowercase)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
def UpperCamelCase__ ( self) -> Any:
try:
AutoConfig.register('''custom''' , __lowercase)
AutoFeatureExtractor.register(__lowercase , __lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoFeatureExtractor.register(__lowercase , __lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase :Union[str, Any] = CustomFeatureExtractor.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowercase)
__UpperCamelCase :Dict = AutoFeatureExtractor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self) -> str:
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Any = True
try:
AutoConfig.register('''custom''' , __lowercase)
AutoFeatureExtractor.register(__lowercase , __lowercase)
# If remote code is not set, the default is to use local
__UpperCamelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''')
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
__UpperCamelCase :Tuple = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
__UpperCamelCase :Optional[int] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(not hasattr(__lowercase , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 452
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[Any] = """wavlm"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=320 , __lowercase=800 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=80 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , **__lowercase , ) -> Dict:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :Tuple = feat_extract_norm
__UpperCamelCase :List[str] = feat_extract_activation
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :Optional[Any] = conv_bias
__UpperCamelCase :Tuple = num_buckets
__UpperCamelCase :Optional[int] = max_bucket_distance
__UpperCamelCase :Union[str, Any] = num_conv_pos_embeddings
__UpperCamelCase :Optional[Any] = num_conv_pos_embedding_groups
__UpperCamelCase :List[Any] = len(self.conv_dim)
__UpperCamelCase :Tuple = num_hidden_layers
__UpperCamelCase :str = intermediate_size
__UpperCamelCase :Union[str, Any] = hidden_act
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :str = hidden_dropout
__UpperCamelCase :int = attention_dropout
__UpperCamelCase :Optional[int] = activation_dropout
__UpperCamelCase :str = feat_proj_dropout
__UpperCamelCase :List[Any] = final_dropout
__UpperCamelCase :int = layerdrop
__UpperCamelCase :List[Any] = layer_norm_eps
__UpperCamelCase :Optional[int] = initializer_range
__UpperCamelCase :Any = num_ctc_classes
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :List[Any] = do_stable_layer_norm
__UpperCamelCase :str = use_weighted_layer_sum
__UpperCamelCase :Any = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :Union[str, Any] = apply_spec_augment
__UpperCamelCase :Optional[Any] = mask_time_prob
__UpperCamelCase :Union[str, Any] = mask_time_length
__UpperCamelCase :Optional[int] = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :str = contrastive_logits_temperature
__UpperCamelCase :Tuple = num_negatives
__UpperCamelCase :Any = codevector_dim
__UpperCamelCase :Union[str, Any] = proj_codevector_dim
__UpperCamelCase :Tuple = diversity_loss_weight
# ctc loss
__UpperCamelCase :int = ctc_loss_reduction
__UpperCamelCase :Any = ctc_zero_infinity
# adapter
__UpperCamelCase :List[Any] = add_adapter
__UpperCamelCase :Dict = adapter_kernel_size
__UpperCamelCase :Any = adapter_stride
__UpperCamelCase :Optional[int] = num_adapter_layers
__UpperCamelCase :Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = list(__lowercase)
__UpperCamelCase :Optional[Any] = list(__lowercase)
__UpperCamelCase :List[str] = list(__lowercase)
__UpperCamelCase :List[Any] = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 452
| 1
|
import sys
UpperCAmelCase_ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A__ ( SCREAMING_SNAKE_CASE_ : str = N ) -> int:
"""simple docstring"""
_UpperCAmelCase = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ):
_UpperCAmelCase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_UpperCAmelCase = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 32
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __magic_name__ ( self , _A ) -> float:
return 0.0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__a : List[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Dict = 512
__a : Dict = [1] + [0] * (size - 1)
__a : Optional[int] = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__a : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Union[str, Any] = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = 20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__a : Dict = get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = 512
__a : List[str] = [1] + [0] * (size - 1)
__a : List[str] = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__a : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Tuple = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 597
| 0
|
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[str] ) -> Any:
"""simple docstring"""
assert isinstance(__lowerCAmelCase, __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def __lowerCamelCase ( __snake_case : int, __snake_case : Dict, __snake_case : Tuple ) -> Any:
"""simple docstring"""
A__ : List[Any] =tmp_path / """cache"""
A__ : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : Optional[int] =ParquetDatasetReader(__lowerCAmelCase, cache_dir=__lowerCAmelCase, keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase, __lowerCAmelCase )
@pytest.mark.parametrize(
"""features""", [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
], )
def __lowerCamelCase ( __snake_case : int, __snake_case : List[str], __snake_case : Any ) -> List[Any]:
"""simple docstring"""
A__ : int =tmp_path / """cache"""
A__ : List[str] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : int =features.copy() if features else default_expected_features
A__ : List[str] =(
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : List[Any] =ParquetDatasetReader(__lowerCAmelCase, features=__lowerCAmelCase, cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase, __lowerCAmelCase )
@pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Optional[int], __snake_case : Optional[int] ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =tmp_path / """cache"""
A__ : Optional[int] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : Optional[int] =ParquetDatasetReader(__lowerCAmelCase, cache_dir=__lowerCAmelCase, split=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase, __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""", [str, list] )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : Any ) -> Tuple:
"""simple docstring"""
if issubclass(__lowerCAmelCase, __lowerCAmelCase ):
A__ : Dict =parquet_path
elif issubclass(__lowerCAmelCase, __lowerCAmelCase ):
A__ : int =[parquet_path]
A__ : List[str] =tmp_path / """cache"""
A__ : List[Any] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : str =ParquetDatasetReader(__lowerCAmelCase, cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase, __lowerCAmelCase )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : Tuple=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(__lowerCAmelCase, __lowerCAmelCase )
for split in splits:
A__ : List[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : str ) -> str:
"""simple docstring"""
A__ : List[Any] =tmp_path / """cache"""
A__ : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : str =ParquetDatasetReader(
{"""train""": parquet_path}, cache_dir=__lowerCAmelCase, keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase, __lowerCAmelCase )
@pytest.mark.parametrize(
"""features""", [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
], )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str, __snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =tmp_path / """cache"""
A__ : List[str] ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : List[Any] =features.copy() if features else default_expected_features
A__ : Tuple =(
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : int =ParquetDatasetReader({"""train""": parquet_path}, features=__lowerCAmelCase, cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase, __lowerCAmelCase )
@pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Any, __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if split:
A__ : Optional[Any] ={split: parquet_path}
else:
A__ : Union[str, Any] ="""train"""
A__ : str ={"""train""": parquet_path, """test""": parquet_path}
A__ : Any =tmp_path / """cache"""
A__ : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : Optional[Any] =ParquetDatasetReader(__lowerCAmelCase, cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase, __lowerCAmelCase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : List[str] ) -> int:
"""simple docstring"""
A__ : List[str] =ParquetDatasetWriter(__lowerCAmelCase, tmp_path / """foo.parquet""" )
assert writer.write() > 0
A__ : Tuple =pq.ParquetFile(tmp_path / """foo.parquet""" )
A__ : List[str] =pf.read()
assert dataset.data.table == output_table
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ : Tuple =str(shared_datadir / """test_image_rgb.jpg""" )
A__ : List[str] ={"""image""": [image_path]}
A__ : str =Features({"""image""": Image()} )
A__ : List[Any] =Dataset.from_dict(__lowerCAmelCase, features=__lowerCAmelCase )
A__ : str =ParquetDatasetWriter(__lowerCAmelCase, tmp_path / """foo.parquet""" )
assert writer.write() > 0
A__ : int =Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
A__ : str =ParquetDatasetReader(str(tmp_path / """foo.parquet""" ), streaming=__lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""", [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
assert get_writer_batch_size(__lowerCAmelCase ) == expected
| 712
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =module
A__ : Union[str, Any] =nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
A__ : Tuple =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'bigscience/bloom-1b7'
# Constant values
__snake_case = 2.109659552692574
__snake_case = 'Hello my name is'
__snake_case = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__snake_case = 10
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
# Models and tokenizer
A__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : str =self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
A__ : Union[str, Any] =config.to_dict()
A__ : Any =config.to_diff_dict()
A__ : Optional[Any] =config.to_json_string()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
A__ : int =self.model_fpaa.get_memory_footprint()
A__ : Optional[Any] =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Tuple =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] =self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
A__ : Tuple =True
A__ : Optional[int] =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
A__ : Union[str, Any] =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
A__ : Dict =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Optional[Any] =self.model_fpaa.to(torch.floataa )
A__ : Dict =self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : List[str] =self.model_fpaa.half()
# Check this does not throw an error
A__ : int =self.model_fpaa.float()
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
A__ : Dict =AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase__ ( cls : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple ="""t5-small"""
A__ : Optional[Any] ="""google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] =AutoTokenizer.from_pretrained(cls.model_name )
A__ : Optional[int] ="""Translate in German: Hello, my dog is cute"""
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
A__ : Optional[int] =TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] =None
# test with `t5-small`
A__ : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : List[str] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : List[str] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Tuple =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] =model.generate(**lowerCAmelCase_ )
A__ : Dict =modules
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Optional[int] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Dict =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any =model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
A__ : Union[str, Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
A__ : Optional[int] =self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Dict =model.generate(**lowerCAmelCase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().setUp()
# model_name
A__ : Any ="""bigscience/bloom-560m"""
A__ : List[Any] ="""t5-small"""
# Different types of model
A__ : Dict =AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
super().setUp()
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : int =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : str =self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Any =model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] ="""facebook/opt-350m"""
super().setUp()
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : int =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : Dict =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
A__ : int =LoRALayer(module.q_proj , rank=16 )
A__ : Any =LoRALayer(module.k_proj , rank=16 )
A__ : Union[str, Any] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] =self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : Any =model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt2-xl'
__snake_case = 3.3191854854152187
| 687
| 0
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase_ = "CompVis/stable-diffusion-v1-1"
UpperCamelCase_ = "CompVis/stable-diffusion-v1-2"
UpperCamelCase_ = "CompVis/stable-diffusion-v1-3"
UpperCamelCase_ = "CompVis/stable-diffusion-v1-4"
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A, A, A, A, A, A = True, ):
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPipeline.from_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = StableDiffusionPipeline.from_pretrained(A )
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline(
vae=A, text_encoder=A, tokenizer=A, unet=A, scheduler=A, safety_checker=A, feature_extractor=A, requires_safety_checker=A, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {k: getattr(self, A ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase_ ( self, A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.enable_attention_slicing(A )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : List[Any] = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 28
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''vocab.txt'''}
snake_case = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
snake_case = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = collections.OrderedDict()
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
_snake_case = reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
_snake_case = token.rstrip('''\n''' )
_snake_case = index
return vocab
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Tuple=2_0_0 ):
"""simple docstring"""
_snake_case = vocab
_snake_case = unk_token
_snake_case = max_input_chars_per_word
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = list(__lowerCamelCase )
if len(__lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_snake_case = 0
_snake_case = []
while start < len(__lowerCamelCase ):
_snake_case = len(__lowerCamelCase )
_snake_case = None
while start < end:
_snake_case = ''''''.join(chars[start:end] )
if substr in self.vocab:
_snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCamelCase )
_snake_case = end
return sub_tokens
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
A__ : Optional[int] = False
def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str="<d>" , __lowerCamelCase : Tuple="</d>" , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : int="</n>" , __lowerCamelCase : Tuple="</_>" , __lowerCamelCase : Optional[Any]="left" , **__lowerCamelCase : str , ):
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__lowerCamelCase , eod_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , unk_token=__lowerCamelCase , line_token=__lowerCamelCase , space_token=__lowerCamelCase , padding_side=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = bod_token
_snake_case = eod_token
_snake_case = load_vocab(__lowerCamelCase )
_snake_case = self.encoder[space_token]
_snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = []
for x in jieba.cut(__lowerCamelCase , cut_all=__lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCamelCase ) )
return output_tokens
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = [i for i in token_ids if i >= 0]
_snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return token in self.encoder
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ):
"""simple docstring"""
return "".join(__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(__lowerCamelCase ):
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_snake_case = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
_snake_case = 0
if " " in self.encoder:
_snake_case = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
_snake_case = self.encoder['''\n''']
del self.encoder["\n"]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_snake_case = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase ))
return [1] + ([0] * len(__lowerCamelCase ))
| 103
| 0
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_SCREAMING_SNAKE_CASE = pytest.mark.integration
_SCREAMING_SNAKE_CASE = {'comet'}
_SCREAMING_SNAKE_CASE = importlib.util.find_spec("fairseq") is not None
_SCREAMING_SNAKE_CASE = {'code_eval'}
_SCREAMING_SNAKE_CASE = os.name == 'nt'
_SCREAMING_SNAKE_CASE = {'bertscore', 'frugalscore', 'perplexity'}
_SCREAMING_SNAKE_CASE = importlib.util.find_spec("transformers") is not None
def _snake_case (_snake_case : str) -> str:
@wraps(_SCREAMING_SNAKE_CASE)
def wrapper(self : str , _snake_case : str):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('\"test requires Fairseq\"')
else:
test_case(self , _SCREAMING_SNAKE_CASE)
return wrapper
def _snake_case (_snake_case : Union[str, Any]) -> Optional[int]:
@wraps(_SCREAMING_SNAKE_CASE)
def wrapper(self : List[Any] , _snake_case : Tuple):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('\"test requires transformers\"')
else:
test_case(self , _SCREAMING_SNAKE_CASE)
return wrapper
def _snake_case (_snake_case : Optional[int]) -> str:
@wraps(_SCREAMING_SNAKE_CASE)
def wrapper(self : Optional[int] , _snake_case : Dict):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('\"test not supported on Windows\"')
else:
test_case(self , _SCREAMING_SNAKE_CASE)
return wrapper
def _snake_case () -> List[str]:
_lowercase =[metric_dir.split(os.sep)[-2] for metric_dir in glob.glob('./metrics/*/')]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case__ , snake_case__ , snake_case__ )
@local
class SCREAMING_SNAKE_CASE_ ( parameterized.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple ={}
__lowerCAmelCase : Tuple =None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning')
def UpperCamelCase__ ( self :Any, snake_case :List[str]):
"""simple docstring"""
_lowercase ='[...]'
_lowercase =importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics', UpperCAmelCase_)).module_path)
_lowercase =datasets.load.import_main_class(metric_module.__name__, dataset=UpperCAmelCase_)
# check parameters
_lowercase =inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCAmelCase_, metric_module.__name__):
with self.use_local_metrics():
try:
_lowercase =doctest.testmod(UpperCAmelCase_, verbose=UpperCAmelCase_, raise_on_error=UpperCAmelCase_)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed, 0)
self.assertGreater(results.attempted, 1)
@slow
def UpperCamelCase__ ( self :Optional[int], snake_case :List[str]):
"""simple docstring"""
_lowercase ='[...]'
_lowercase =importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics', UpperCAmelCase_)).module_path)
# run doctest
with self.use_local_metrics():
_lowercase =doctest.testmod(UpperCAmelCase_, verbose=UpperCAmelCase_, raise_on_error=UpperCAmelCase_)
self.assertEqual(results.failed, 0)
self.assertGreater(results.attempted, 1)
@contextmanager
def UpperCamelCase__ ( self :Union[str, Any], snake_case :str, snake_case :Optional[int]):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase_):
yield
else:
yield
@contextmanager
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
def load_local_metric(snake_case :Optional[Any], *snake_case :Optional[Any], **snake_case :str):
return load_metric(os.path.join('metrics', UpperCAmelCase_), *UpperCAmelCase_, **UpperCAmelCase_)
with patch('datasets.load_metric') as mock_load_metric:
_lowercase =load_local_metric
yield
@classmethod
def UpperCamelCase__ ( cls :Any, snake_case :Dict):
"""simple docstring"""
def wrapper(snake_case :Optional[int]):
_lowercase =contextmanager(UpperCAmelCase_)
_lowercase =patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt')
def _snake_case (_snake_case : List[str]) -> str:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '') # handle pytest cli flags
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def UpperCamelCase__ ( self :Union[str, Any], snake_case :str):
"""simple docstring"""
assert len(input_dict['input_ids']) == 2
return np.array([1.0_3, 1.0_4])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor') as mock_create_predictor:
_lowercase =MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore')
def _snake_case (_snake_case : Dict) -> Optional[Any]:
import torch
def bert_cos_score_idf(_snake_case : Any , _snake_case : Optional[int] , *_snake_case : Dict , **_snake_case : Optional[Any]):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE))
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model'), patch(
'bert_score.scorer.bert_cos_score_idf') as mock_bert_cos_score_idf:
_lowercase =bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet')
def _snake_case (_snake_case : Union[str, Any]) -> Any:
def load_from_checkpoint(_snake_case : Dict):
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def UpperCamelCase__ ( self :Union[str, Any], snake_case :Union[str, Any], *snake_case :Optional[int], **snake_case :str):
"""simple docstring"""
assert len(UpperCAmelCase_) == 2
_lowercase =[0.1_9, 0.9_2]
return scores, sum(UpperCAmelCase_) / len(UpperCAmelCase_)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model') as mock_download_model:
_lowercase =None
with patch('comet.load_from_checkpoint') as mock_load_from_checkpoint:
_lowercase =load_from_checkpoint
yield
def _snake_case () -> List[str]:
_lowercase =load_metric(os.path.join('metrics' , 'seqeval'))
_lowercase ='ERROR'
_lowercase =f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE)):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE)
| 702
|
def _snake_case (_snake_case : int = 100_0000) -> int:
_lowercase =[i - 1 for i in range(limit + 1)]
for i in range(2 , limit + 1):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _snake_case):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1])
if __name__ == "__main__":
print(solution())
| 557
| 0
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , SCREAMING_SNAKE_CASE_ , )
| 437
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = DPTConfig()
if "large" in checkpoint_url:
_lowerCamelCase : Optional[Any] = 10_24
_lowerCamelCase : List[str] = 40_96
_lowerCamelCase : Union[str, Any] = 24
_lowerCamelCase : Any = 16
_lowerCamelCase : Union[str, Any] = [5, 11, 17, 23]
_lowerCamelCase : Optional[int] = [2_56, 5_12, 10_24, 10_24]
_lowerCamelCase : List[str] = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
_lowerCamelCase : Any = True
_lowerCamelCase : List[str] = 1_50
_lowerCamelCase : int = 'huggingface/label-files'
_lowerCamelCase : Union[str, Any] = 'ade20k-id2label.json'
_lowerCamelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='dataset' ) ) , 'r' ) )
_lowerCamelCase : Optional[Any] = {int(__a ): v for k, v in idalabel.items()}
_lowerCamelCase : int = idalabel
_lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCAmelCase_ ( __a : Tuple ):
'''simple docstring'''
_lowerCamelCase : int = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__a , __a )
def UpperCAmelCase_ ( __a : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCamelCase : Optional[Any] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowerCamelCase : Optional[int] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowerCamelCase : Any = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
_lowerCamelCase : str = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowerCamelCase : int = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowerCamelCase : Tuple = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowerCamelCase : Optional[int] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowerCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowerCamelCase : Optional[int] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowerCamelCase : Dict = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowerCamelCase : List[Any] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowerCamelCase : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCamelCase : Union[str, Any] = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
_lowerCamelCase : Optional[Any] = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowerCamelCase : str = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowerCamelCase : List[str] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowerCamelCase : List[Any] = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowerCamelCase : Optional[Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCamelCase : Dict = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCamelCase : Optional[int] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowerCamelCase : str = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowerCamelCase : Any = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowerCamelCase : Tuple = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowerCamelCase : str = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowerCamelCase : Union[str, Any] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowerCamelCase : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def UpperCAmelCase_ ( __a : Tuple , __a : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
_lowerCamelCase : Dict = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: config.hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : str = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : Dict = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __a : Optional[int] , __a : int , __a : str , __a : List[str] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : List[str] = get_dpt_config(__a )
# load original state_dict from URL
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(__a , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
_lowerCamelCase : str = state_dict.pop(__a )
_lowerCamelCase : str = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
_lowerCamelCase : List[str] = DPTForSemanticSegmentation(__a ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
_lowerCamelCase : str = 4_80 if 'ade' in checkpoint_url else 3_84
_lowerCamelCase : Optional[Any] = DPTImageProcessor(size=__a )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : str = image_processor(__a , return_tensors='pt' )
# forward pass
_lowerCamelCase : Tuple = model(**__a ).logits if 'ade' in checkpoint_url else model(**__a ).predicted_depth
# Assert logits
_lowerCamelCase : Dict = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_lowerCamelCase : List[Any] = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __a )
)
Path(__a ).mkdir(exist_ok=__a )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__a , )
image_processor.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__a , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
a_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 437
| 1
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 714
|
from __future__ import annotations
from typing import TypedDict
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : int
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(lowerCamelCase_ ) )]
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
__a : List[Any] = all_rotations(lowerCamelCase_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__a : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCamelCase_ ),
}
return response
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
__a : Tuple = int(lowerCamelCase_ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(lowerCamelCase_ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
__a : str = [''] * len(lowerCamelCase_ )
for _ in range(len(lowerCamelCase_ ) ):
for i in range(len(lowerCamelCase_ ) ):
__a : List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''Provide a string that I will generate its BWT transform: '''
SCREAMING_SNAKE_CASE__ = input(entry_msg).strip()
SCREAMING_SNAKE_CASE__ = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
SCREAMING_SNAKE_CASE__ = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 577
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.