code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE: Any = logging.get_logger(__name__)
@dataclass
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Any , **snake_case__ : List[Any] ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE_ = deprecated_arg[3:]
setattr(self , snake_case__ , not kwargs.pop(snake_case__ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('torchscript' , self.torchscript )
SCREAMING_SNAKE_CASE_ = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE_ = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**snake_case__ )
lowerCAmelCase__ =field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Trace the models using torchscript"} )
lowerCAmelCase__ =field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
lowerCAmelCase__ =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
SCREAMING_SNAKE_CASE_ = torch.device('cpu' )
SCREAMING_SNAKE_CASE_ = 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE_ = xm.xla_device()
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
return device, n_gpu
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __a ( self : Tuple ):
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def __a ( self : str ):
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def __a ( self : Tuple ):
"""simple docstring"""
return self.n_gpu > 0
| 360
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _a ( lowerCAmelCase )-> float:
return np.dot(lowerCAmelCase , lowerCAmelCase )
class lowercase_ :
def __init__( self : int , *,
snake_case__ : float = np.inf , snake_case__ : str = "linear" , snake_case__ : float = 0.0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = regularization
SCREAMING_SNAKE_CASE_ = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
SCREAMING_SNAKE_CASE_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE_ = f'''Unknown kernel: {kernel}'''
raise ValueError(snake_case__ )
def __a ( self : Tuple , snake_case__ : ndarray , snake_case__ : ndarray ):
"""simple docstring"""
return np.dot(snake_case__ , snake_case__ )
def __a ( self : int , snake_case__ : ndarray , snake_case__ : ndarray ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __a ( self : int , snake_case__ : list[ndarray] , snake_case__ : ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = observations
SCREAMING_SNAKE_CASE_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE_) , ) = np.shape(snake_case__ )
def to_minimize(snake_case__ : ndarray ) -> float:
SCREAMING_SNAKE_CASE_ = 0
((SCREAMING_SNAKE_CASE_) , ) = np.shape(snake_case__ )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(snake_case__ )
SCREAMING_SNAKE_CASE_ = LinearConstraint(snake_case__ , 0 , 0 )
SCREAMING_SNAKE_CASE_ = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE_ = minimize(
snake_case__ , np.ones(snake_case__ ) , bounds=snake_case__ , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE_ = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE_ = 0
for i in range(snake_case__ ):
for j in range(snake_case__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE_ = s / n
def __a ( self : Any , snake_case__ : ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , snake_case__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase_ ( __a ):
_lowerCamelCase = '''marian'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase_=58_101 , lowercase_=None , lowercase_=1_024 , lowercase_=12 , lowercase_=4_096 , lowercase_=16 , lowercase_=12 , lowercase_=4_096 , lowercase_=16 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_=True , lowercase_="gelu" , lowercase_=1_024 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=58_100 , lowercase_=False , lowercase_=58_100 , lowercase_=0 , lowercase_=0 , lowercase_=True , **lowercase_ , ):
_snake_case : Union[str, Any] = vocab_size
_snake_case : Optional[int] = decoder_vocab_size or vocab_size
_snake_case : str = max_position_embeddings
_snake_case : Union[str, Any] = d_model
_snake_case : List[str] = encoder_ffn_dim
_snake_case : Union[str, Any] = encoder_layers
_snake_case : List[str] = encoder_attention_heads
_snake_case : Tuple = decoder_ffn_dim
_snake_case : Optional[Any] = decoder_layers
_snake_case : Dict = decoder_attention_heads
_snake_case : int = dropout
_snake_case : Any = attention_dropout
_snake_case : Any = activation_dropout
_snake_case : int = activation_function
_snake_case : List[str] = init_std
_snake_case : List[str] = encoder_layerdrop
_snake_case : Any = decoder_layerdrop
_snake_case : List[Any] = use_cache
_snake_case : List[Any] = encoder_layers
_snake_case : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
class lowercase_ ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_snake_case : Tuple = {0: "batch"}
_snake_case : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_snake_case : str = {0: "batch", 1: "decoder_sequence"}
_snake_case : int = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_snake_case ,_snake_case : Tuple = self.num_layers
for i in range(snake_case__ ):
_snake_case : Any = {0: "batch", 2: "past_sequence + sequence"}
_snake_case : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
_snake_case : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case : Optional[int] = super().outputs
else:
_snake_case : Optional[int] = super(snake_case__ , self ).outputs
if self.use_past:
_snake_case ,_snake_case : str = self.num_layers
for i in range(snake_case__ ):
_snake_case : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
_snake_case : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
_snake_case : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Generate decoder inputs
_snake_case : Tuple = seq_length if not self.use_past else 1
_snake_case : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_snake_case : List[Any] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_snake_case : Union[str, Any] = dict(**snake_case__ , **snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_snake_case ,_snake_case : Any = common_inputs["input_ids"].shape
_snake_case : List[str] = common_inputs["decoder_input_ids"].shape[1]
_snake_case ,_snake_case : Any = self.num_attention_heads
_snake_case : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case : Union[str, Any] = decoder_seq_length + 3
_snake_case : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(snake_case__ , snake_case__ )] , dim=1 )
_snake_case : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case ,_snake_case : Optional[Any] = self.num_layers
_snake_case : Tuple = min(snake_case__ , snake_case__ )
_snake_case : Optional[Any] = max(snake_case__ , snake_case__ ) - min_num_layers
_snake_case : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(snake_case__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
) )
# TODO: test this.
_snake_case : List[str] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(snake_case__ , snake_case__ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) )
return common_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
_snake_case : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_snake_case ,_snake_case : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_snake_case : Any = seqlen + 2
_snake_case ,_snake_case : str = self.num_layers
_snake_case ,_snake_case : List[Any] = self.num_attention_heads
_snake_case : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case : Optional[int] = common_inputs["attention_mask"].dtype
_snake_case : List[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
_snake_case : List[str] = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(snake_case__ )
]
return common_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : Optional[Any] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case : List[Any] = tokenizer.num_special_tokens_to_add(snake_case__ )
_snake_case : Dict = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
_snake_case : Optional[Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case : Union[str, Any] = dict(tokenizer(snake_case__ , return_tensors=snake_case__ ) )
return common_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
else:
_snake_case : str = self._generate_dummy_inputs_for_causal_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
return common_inputs
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case : int = super()._flatten_past_key_values_(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
_snake_case : Optional[int] = super(snake_case__ , self )._flatten_past_key_values_(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@property
def UpperCamelCase ( self ):
return 1e-4
| 706
|
from string import ascii_uppercase
__SCREAMING_SNAKE_CASE : Any = {char: i for i, char in enumerate(ascii_uppercase)}
__SCREAMING_SNAKE_CASE : str = dict(enumerate(ascii_uppercase))
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : str = len(__lowercase )
_snake_case : List[Any] = 0
while True:
if x == i:
_snake_case : Tuple = 0
if len(__lowercase ) == len(__lowercase ):
break
key += key[i]
i += 1
return key
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : Dict = ""
_snake_case : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_snake_case : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = ""
_snake_case : Optional[int] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_snake_case : Dict = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case () -> None:
'''simple docstring'''
_snake_case : Any = "THE GERMAN ATTACK"
_snake_case : Optional[Any] = "SECRET"
_snake_case : List[Any] = generate_key(__lowercase , __lowercase )
_snake_case : int = cipher_text(__lowercase , __lowercase )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__lowercase , __lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 580
| 0
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=None , *_lowerCamelCase : str , **_lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
if config is None:
assert isinstance(self.model , lowercase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
__lowerCamelCase : Union[str, Any] = self.model.config
else:
__lowerCamelCase : Optional[int] = config
__lowerCamelCase : str = data_args
__lowerCamelCase : Optional[Any] = self.config.tgt_vocab_size if isinstance(self.config , lowercase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
""" padding..""" )
if self.args.label_smoothing == 0:
__lowerCamelCase : Optional[int] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__lowerCamelCase : int = label_smoothed_nll_loss
def _snake_case ( self : Optional[Any] , _lowerCamelCase : int ):
'''simple docstring'''
if self.optimizer is None:
__lowerCamelCase : Tuple = ['''bias''', '''LayerNorm.weight''']
__lowerCamelCase : Dict = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
__lowerCamelCase : Any = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__lowerCamelCase : List[str] = Adafactor
__lowerCamelCase : Optional[int] = {'''scale_parameter''': False, '''relative_step''': False}
else:
__lowerCamelCase : Tuple = AdamW
__lowerCamelCase : Optional[Any] = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
__lowerCamelCase : int = self.args.learning_rate
if self.sharded_ddp:
__lowerCamelCase : Union[str, Any] = OSS(
params=lowercase_ , optim=lowercase_ , **lowercase_ , )
else:
__lowerCamelCase : Any = optimizer_cls(lowercase_ , **lowercase_ )
if self.lr_scheduler is None:
__lowerCamelCase : int = self._get_lr_scheduler(lowercase_ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__lowerCamelCase : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__lowerCamelCase : Dict = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__lowerCamelCase : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowercase_ )
return scheduler
def _snake_case ( self : List[str] ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _snake_case ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__lowerCamelCase : Optional[int] = model(**lowercase_ , use_cache=lowercase_ )[0]
__lowerCamelCase : str = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__lowerCamelCase : str = model(**lowercase_ , labels=lowercase_ , use_cache=lowercase_ )[:2]
else:
# compute label smoothed loss
__lowerCamelCase : Optional[int] = model(**lowercase_ , use_cache=lowercase_ )[0]
__lowerCamelCase : List[str] = torch.nn.functional.log_softmax(lowercase_ , dim=-1 )
__lowerCamelCase : Optional[Any] = self.loss_fn(lowercase_ , lowercase_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _snake_case ( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = inputs.pop("""labels""" )
__lowerCamelCase : List[str] = self._compute_loss(lowercase_ , lowercase_ , lowercase_ )
return loss
def _snake_case ( self : Dict , _lowerCamelCase : nn.Module , _lowerCamelCase : Dict[str, Union[torch.Tensor, Any]] , _lowerCamelCase : bool , _lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self._prepare_inputs(lowercase_ )
__lowerCamelCase : Tuple = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__lowerCamelCase : Optional[int] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **lowercase_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__lowerCamelCase : Optional[Any] = self._pad_tensors_to_max_len(lowercase_ , gen_kwargs["""max_length"""] )
__lowerCamelCase : Tuple = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__lowerCamelCase : Tuple = self._compute_loss(lowercase_ , lowercase_ , lowercase_ )
__lowerCamelCase : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__lowerCamelCase : Dict = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__lowerCamelCase : Any = self._pad_tensors_to_max_len(lowercase_ , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _snake_case ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F""" padded to `max_length`={max_length}""" )
__lowerCamelCase : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__lowerCamelCase : List[Any] = tensor
return padded_tensor
| 519
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 512
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ) -> Dict:
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCamelCase_ = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : List[Any] , **__SCREAMING_SNAKE_CASE : int ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def UpperCamelCase ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def UpperCamelCase ( self : str ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self : Any ) -> Optional[Any]:
lowerCamelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowerCamelCase_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='np' )
lowerCamelCase_ = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self : int ) -> Tuple:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = processor(text=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def UpperCamelCase ( self : Dict ) -> int:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ) -> str:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 137
|
"""simple docstring"""
import math
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCamelCase_ = range(3 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=1 , **_lowerCamelCase : int ) -> str:
lowerCamelCase_ = factor * value
lowerCamelCase_ = value
while not is_prime(_lowerCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowerCamelCase )
return value
| 137
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : Tuple = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 213
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000 ) -> int:
_lowerCAmelCase : Optional[int] = 2**power
_lowerCAmelCase : str = str(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = list(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0
for i in list_num:
sum_of_num += int(_lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
_a : str = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
_a : Tuple = solution(power)
print('Sum of the digits is: ', result)
| 213
| 1
|
from math import isclose, sqrt
def __UpperCAmelCase ( __A , __A , __A ) -> tuple[float, float, float]:
'''simple docstring'''
UpperCAmelCase__ = point_y / 4 / point_x
UpperCAmelCase__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase__ = outgoing_gradient**2 + 4
UpperCAmelCase__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase__ = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
UpperCAmelCase__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase__ = x_minus if isclose(__A , __A ) else x_plus
UpperCAmelCase__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __UpperCAmelCase ( __A = 1.4 , __A = -9.6 ) -> int:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = first_x_coord
UpperCAmelCase__ = first_y_coord
UpperCAmelCase__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = next_point(__A , __A , __A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"{solution() = }")
| 717
|
import csv
import tweepy
# Twitter API credentials
A = ""
A = ""
A = ""
A = ""
def __UpperCAmelCase ( __A ) -> None:
'''simple docstring'''
UpperCAmelCase__ = tweepy.OAuthHandler(__A , __A )
auth.set_access_token(__A , __A )
UpperCAmelCase__ = tweepy.API(__A )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase__ = api.user_timeline(screen_name=__A , count=2_0_0 )
# save most recent tweets
alltweets.extend(__A )
# save the id of the oldest tweet less one
UpperCAmelCase__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__A ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase__ = api.user_timeline(
screen_name=__A , count=2_0_0 , max_id=__A )
# save most recent tweets
alltweets.extend(__A )
# update the id of the oldest tweet less one
UpperCAmelCase__ = alltweets[-1].id - 1
print(F"""...{len(__A )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase__ = csv.writer(__A )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(__A )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 277
| 0
|
__A : Tuple = {str(digit): digit**5 for digit in range(1_0)}
def __a ( A__ : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) )
def __a ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(A__ ) )
if __name__ == "__main__":
print(solution())
| 16
|
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = len(lowerCamelCase )
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if numbers[j] < numbers[i]:
__lowercase , __lowercase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : int = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : torch.FloatTensor
__A : Optional[torch.FloatTensor] =None
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int]=0.999 , _SCREAMING_SNAKE_CASE : List[Any]="cosine" , ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase_ : List[str] = []
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = i / num_diffusion_timesteps
UpperCAmelCase_ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@register_to_config
def __init__( self ,_snake_case = 10_00 ,_snake_case = "fixed_small_log" ,_snake_case = True ,_snake_case = 1.0 ,_snake_case = "epsilon" ,_snake_case = "squaredcos_cap_v2" ,):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase_ : Optional[Any] = betas_for_alpha_bar(_snake_case )
UpperCAmelCase_ : Union[str, Any] = 1.0 - self.betas
UpperCAmelCase_ : int = torch.cumprod(self.alphas ,dim=0 )
UpperCAmelCase_ : List[str] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : int = 1.0
# setable values
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(np.arange(0 ,_snake_case )[::-1].copy() )
UpperCAmelCase_ : Optional[Any] = variance_type
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
return sample
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : Optional[Any] = num_inference_steps
UpperCAmelCase_ : Optional[Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase_ : Tuple = (np.arange(0 ,_snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase_ : Tuple = torch.from_numpy(_snake_case ).to(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if prev_timestep is None:
UpperCAmelCase_ : Any = t - 1
UpperCAmelCase_ : Tuple = self.alphas_cumprod[t]
UpperCAmelCase_ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_ : Tuple = 1 - alpha_prod_t
UpperCAmelCase_ : Optional[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_ : Any = self.betas[t]
else:
UpperCAmelCase_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase_ : List[str] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase_ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase_ : int = torch.log(torch.clamp(_snake_case ,min=1E-20 ) )
UpperCAmelCase_ : List[str] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase_ : Optional[Any] = variance.log()
UpperCAmelCase_ : Union[str, Any] = beta.log()
UpperCAmelCase_ : Dict = (predicted_variance + 1) / 2
UpperCAmelCase_ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase_ , UpperCAmelCase_ : Any = torch.split(_snake_case ,sample.shape[1] ,dim=1 )
else:
UpperCAmelCase_ : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase_ : Optional[int] = t - 1
UpperCAmelCase_ : int = self.alphas_cumprod[t]
UpperCAmelCase_ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_ : Dict = 1 - alpha_prod_t
UpperCAmelCase_ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_ : List[str] = self.betas[t]
UpperCAmelCase_ : int = self.alphas[t]
else:
UpperCAmelCase_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase_ : List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase_ : Optional[int] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ : Dict = torch.clamp(
_snake_case ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase_ : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ : Union[str, Any] = 0
if t > 0:
UpperCAmelCase_ : Optional[Any] = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=_snake_case ,device=model_output.device )
UpperCAmelCase_ : Any = self._get_variance(
_snake_case ,predicted_variance=_snake_case ,prev_timestep=_snake_case ,)
if self.variance_type == "fixed_small_log":
UpperCAmelCase_ : Union[str, Any] = variance
elif self.variance_type == "learned_range":
UpperCAmelCase_ : List[str] = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
UpperCAmelCase_ : List[Any] = variance * variance_noise
UpperCAmelCase_ : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_snake_case ,pred_original_sample=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
UpperCAmelCase_ : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
UpperCAmelCase_ : str = timesteps.to(original_samples.device )
UpperCAmelCase_ : Dict = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase_ : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Dict = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase_ : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_ : Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 71
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Tuple=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[int]=True , __magic_name__ : int=True , __magic_name__ : Optional[int]=True , __magic_name__ : Any=99 , __magic_name__ : Optional[Any]=64 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Dict=5 , __magic_name__ : str=4 , __magic_name__ : List[Any]=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : str=512 , __magic_name__ : Dict=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : List[str]=3 , __magic_name__ : str=4 , __magic_name__ : List[str]=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = embedding_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : List[str] ) -> int:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = MobileBertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , token_type_ids=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = MobileBertForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = MobileBertForNextSentencePrediction(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = MobileBertForPreTraining(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , next_sentence_label=__magic_name__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self : Dict , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = MobileBertForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileBertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : str , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileBertForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = MobileBertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def __A ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : int=False ) -> Any:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = MobileBertModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __A ( self : int ) -> List[str]:
self.config_tester.run_common_tests()
def __A ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__magic_name__ )
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__magic_name__ )
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__magic_name__ )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__magic_name__ )
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__magic_name__ )
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__magic_name__ )
def __A ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__magic_name__ )
def __A ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__magic_name__ )
def a__ ( __UpperCamelCase ):
return torch.tensor(
__UpperCamelCase , dtype=torch.long , device=__UpperCamelCase , )
A : List[str] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=__magic_name__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
SCREAMING_SNAKE_CASE_ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
SCREAMING_SNAKE_CASE_ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 140
| 0
|
class __snake_case :
def __init__( self ) -> List[str]:
'''simple docstring'''
snake_case__ : str = 0
snake_case__ : int = 0
snake_case__ : int = {}
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if vertex not in self.adjacency:
snake_case__ : Optional[int] = {}
self.num_vertices += 1
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.add_vertex(__UpperCamelCase )
self.add_vertex(__UpperCamelCase )
if head == tail:
return
snake_case__ : Optional[Any] = weight
snake_case__ : Union[str, Any] = weight
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_edges()
for edge in edges:
snake_case__ , snake_case__ , snake_case__ : str = edge
edges.remove((tail, head, weight) )
for i in range(len(__UpperCamelCase ) ):
snake_case__ : Tuple = list(edges[i] )
edges.sort(key=lambda __UpperCamelCase : e[2] )
for i in range(len(__UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case__ : str = edges[i][2] + 1
for edge in edges:
snake_case__ , snake_case__ , snake_case__ : List[Any] = edge
snake_case__ : Tuple = weight
snake_case__ : str = weight
def __str__( self ) -> int:
'''simple docstring'''
snake_case__ : Tuple = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case__ : List[str] = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __a ( self ) -> Dict:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __a ( __UpperCamelCase=None , __UpperCamelCase=None ) -> List[str]:
'''simple docstring'''
snake_case__ : int = Graph()
if vertices is None:
snake_case__ : List[str] = []
if edges is None:
snake_case__ : Tuple = []
for vertex in vertices:
g.add_vertex(__UpperCamelCase )
for edge in edges:
g.add_edge(*__UpperCamelCase )
return g
class __snake_case :
def __init__( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = {}
snake_case__ : str = {}
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.parent )
def __a ( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
if item in self.parent:
return self.find(__UpperCamelCase )
snake_case__ : Optional[int] = item
snake_case__ : str = 0
return item
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(__UpperCamelCase )
if item != self.parent[item]:
snake_case__ : List[str] = self.find(self.parent[item] )
return self.parent[item]
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = self.find(__UpperCamelCase )
snake_case__ : List[Any] = self.find(__UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case__ : str = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case__ : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case__ : Dict = roota
return roota
return None
@staticmethod
def __a ( __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = graph.num_vertices
snake_case__ : str = Graph.UnionFind()
snake_case__ : Dict = []
while num_components > 1:
snake_case__ : Optional[Any] = {}
for vertex in graph.get_vertices():
snake_case__ : List[str] = -1
snake_case__ : str = graph.get_edges()
for edge in edges:
snake_case__ , snake_case__ , snake_case__ : Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case__ , snake_case__ , snake_case__ : Optional[int] = edge
snake_case__ : List[Any] = union_find.find(__UpperCamelCase )
snake_case__ : List[Any] = union_find.find(__UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case__ , snake_case__ , snake_case__ : Dict = cheap_edge[vertex]
if union_find.find(__UpperCamelCase ) != union_find.find(__UpperCamelCase ):
union_find.union(__UpperCamelCase , __UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
snake_case__ : Union[str, Any] = num_components - 1
snake_case__ : Tuple = Graph.build(edges=__UpperCamelCase )
return mst
| 699
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 699
| 1
|
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( UpperCAmelCase_ ):
'''simple docstring'''
A__ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
A__ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
A__ = '''document_qa'''
A__ = AutoProcessor
A__ = VisionEncoderDecoderModel
A__ = ['''image''', '''text''']
A__ = ['''text''']
def __init__(self : Tuple , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_lowercase , **_lowercase )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
lowercase__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase__ = task_prompt.replace("""{user_input}""" , _lowercase )
lowercase__ = self.pre_processor.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors="""pt""" ).input_ids
lowercase__ = self.pre_processor(_lowercase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowercase , ).sequences
def lowerCamelCase__ (self : int , _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.pre_processor.batch_decode(_lowercase )[0]
lowercase__ = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowercase__ = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowercase__ = re.sub(r"""<.*?>""" , """""" , _lowercase , count=1 ).strip() # remove first task start token
lowercase__ = self.pre_processor.tokenajson(_lowercase )
return sequence["answer"]
| 15
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = None
_snake_case = None
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : int = Node(1 )
_lowerCamelCase : str = Node(2 )
_lowerCamelCase : Dict = Node(3 )
_lowerCamelCase : str = Node(4 )
_lowerCamelCase : Any = Node(5 )
return tree
def A__ ( __A ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A__ ( __A ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A__ ( __A ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A__ ( __A ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
if root is None:
return output
_lowerCamelCase : List[str] = deque([root] )
while process_queue:
_lowerCamelCase : Tuple = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
def populate_output(__A , __A ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__A , __A )
return output
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : list[Any] = []
def populate_output(__A , __A ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__A , __A )
return output
def A__ ( __A ):
'''simple docstring'''
if root is None:
return []
_lowerCamelCase : list[Sequence[Node | None]] = []
_lowerCamelCase : str = 0
_lowerCamelCase : Optional[Any] = height(__A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__A , __A ) )
_lowerCamelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__A , __A ) )
_lowerCamelCase : List[Any] = 0
return output
def A__ ( ): # Main function for testing.
'''simple docstring'''
_lowerCamelCase : Dict = make_tree()
print(F"""In-order Traversal: {inorder(__A )}""" )
print(F"""Pre-order Traversal: {preorder(__A )}""" )
print(F"""Post-order Traversal: {postorder(__A )}""" , """\n""" )
print(F"""Height of Tree: {height(__A )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__A ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__A ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(__A , level=__A ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15
|
import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15
| 1
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __A :
'''simple docstring'''
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
return None
class __A :
'''simple docstring'''
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
return None
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , '''tf''' , 1_2 , **lowerCamelCase_ )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , '''pt''' , 1_2 , **lowerCamelCase_ )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_ , '''pt''' , 1_2 , lowerCamelCase_ )
@require_tf
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ = self._test_export(lowerCamelCase_ , '''tf''' , 1_2 , **lowerCamelCase_ )
lowerCamelCase__ = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ = self._test_export(lowerCamelCase_ , '''pt''' , 1_2 , **lowerCamelCase_ )
lowerCamelCase__ = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ = Path(lowerCamelCase_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowerCamelCase__ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowerCamelCase__ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , '''tf''' )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = FeatureExtractionPipeline(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
lowerCamelCase__ = infer_shapes(lowerCamelCase_ , lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
lowerCamelCase__ = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
lowerCamelCase__ = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ) , set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 481
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698
| 0
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict,lowercase_ : Tuple,lowercase_ : Dict=1_3,lowercase_ : Dict=7,lowercase_ : List[Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : Tuple=False,lowercase_ : Dict=True,lowercase_ : int=9_9,lowercase_ : Tuple=3_2,lowercase_ : List[Any]=5,lowercase_ : Any=4,lowercase_ : str=3_7,lowercase_ : Union[str, Any]="gelu",lowercase_ : Tuple=0.1,lowercase_ : List[Any]=0.1,lowercase_ : Tuple=5_1_2,lowercase_ : Any=1_6,lowercase_ : str=2,lowercase_ : Optional[Any]=0.02,lowercase_ : Union[str, Any]=3,lowercase_ : Union[str, Any]=4,lowercase_ : int=None,)-> Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size,dim=self.hidden_size,n_layers=self.num_hidden_layers,n_heads=self.num_attention_heads,hidden_dim=self.intermediate_size,hidden_act=self.hidden_act,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,)
def snake_case__ ( self : Dict,lowercase_ : List[Any],lowercase_ : Dict,lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int] )-> Optional[Any]:
'''simple docstring'''
A__ = DistilBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : str,lowercase_ : Tuple,lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : Tuple,lowercase_ : Optional[int],lowercase_ : str )-> Union[str, Any]:
'''simple docstring'''
A__ = DistilBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[Any],lowercase_ : Any,lowercase_ : List[Any],lowercase_ : int,lowercase_ : Any,lowercase_ : List[Any],lowercase_ : str )-> Optional[Any]:
'''simple docstring'''
A__ = DistilBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_,attention_mask=lowercase_,start_positions=lowercase_,end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = self.num_labels
A__ = DistilBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str],lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : Tuple,lowercase_ : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.num_labels
A__ = DistilBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Any,lowercase_ : List[str],lowercase_ : int,lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : int,lowercase_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
A__ = self.num_choices
A__ = DistilBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
A__ = model(
lowercase_,attention_mask=lowercase_,labels=lowercase_,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = DistilBertModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,dim=3_7 )
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase_ )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_ )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_ )
@slow
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DistilBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def snake_case__ ( self : Tuple )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A__ = True
A__ = model_class(config=lowercase_ )
A__ = self._prepare_for_class(lowercase_,lowercase_ )
A__ = torch.jit.trace(
lowercase_,(inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_,os.path.join(lowercase_,'traced_model.pt' ) )
A__ = torch.jit.load(os.path.join(lowercase_,'traced_model.pt' ),map_location=lowercase_ )
loaded(inputs_dict['input_ids'].to(lowercase_ ),inputs_dict['attention_mask'].to(lowercase_ ) )
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : int )-> str:
'''simple docstring'''
A__ = DistilBertModel.from_pretrained('distilbert-base-uncased' )
A__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(lowercase_,attention_mask=lowercase_ )[0]
A__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape,lowercase_ )
A__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4],lowercase_,atol=1E-4 ) )
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 586
| 0
|
def A__ ( __A : int , __A : List[str] , __A : Tuple , __A : Dict , __A : int , __A : Any ) ->Optional[int]:
if index == r:
for j in range(__A ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__A =arr[i]
combination_util(__A , __A , __A , index + 1 , __A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__A , __A , __A , __A , __A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A__ ( __A : List[str] , __A : Dict , __A : Tuple ) ->Dict:
# A temporary array to store all combination one by one
__A =[0] * r
# Print all combination using temporary array 'data[]'
combination_util(__A , __A , __A , 0 , __A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowerCamelCase : str = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 184
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __A : Any , __A : Dict , __A : Optional[int]=None ) ->Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
__A =nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
__A =nn.Parameter(__A )
def A__ ( __A : List[Any] , __A : Tuple , __A : List[Any] ) ->Dict:
# set torch weights for 1-to-1 comparison
__A =np.asarray(weights[0] )
__A =np.asarray(weights[1] )
__A =np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A__ ( __A : Optional[Any] , __A : Tuple , __A : Optional[Any] ) ->int:
# set torch weights for 1-to-1 comparison
__A =np.asarray(weights[0] )
__A =np.asarray(weights[1] )
__A =np.asarray(weights[2] )
__A =np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A__ ( __A : int , __A : List[str] , __A : Optional[Any] ) ->Any:
# layernorm 1
__A =weights[0][0][0]
__A =np.asarray(layer_norm_a[0] )
__A =np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
__A =weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
__A =weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
__A =intermediate_weights[2]
# layernorm 2
__A =np.asarray(intermediate_weights[0][0] )
__A =np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
__A =np.asarray(intermediate_weights[1][0] )
__A =np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
__A =np.asarray(intermediate_weights[4][0] )
__A =np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A__ ( __A : Tuple , __A : List[Any] , __A : Optional[int] ) ->List[Any]:
# reformer model
__A =torch_model.reformer
# word embeds
__A =np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
__A =torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__A =np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
__A =nn.Parameter(torch.tensor(__A ) )
__A =weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__A =trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
__A =np.asarray(weights[7][0] )
__A =np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
__A =np.asarray(weights[9][0] )
__A =np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A__ ( __A : int , __A : Any , __A : Tuple ) ->Union[str, Any]:
# Initialise PyTorch model
__A =ReformerConfig.from_json_file(__A )
print(F'''Building PyTorch model from configuration: {config}''' )
__A =ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
__A =pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 184
| 1
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
A_ : Any = VideoMAEConfig()
set_architecture_configs(__lowercase ,__lowercase )
if "finetuned" not in model_name:
A_ : int = False
if "finetuned" in model_name:
A_ : Any = 'huggingface/label-files'
if "kinetics" in model_name:
A_ : Dict = 4_00
A_ : Union[str, Any] = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
A_ : Dict = 1_74
A_ : List[Any] = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
A_ : Optional[int] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
if "small" in model_name:
A_ : Optional[Any] = 3_84
A_ : Union[str, Any] = 15_36
A_ : List[Any] = 12
A_ : Union[str, Any] = 16
A_ : Optional[int] = 12
A_ : Tuple = 3
A_ : str = 1_92
A_ : Union[str, Any] = 7_68
elif "large" in model_name:
A_ : Any = 10_24
A_ : int = 40_96
A_ : int = 24
A_ : Optional[Any] = 16
A_ : int = 12
A_ : Optional[int] = 8
A_ : Union[str, Any] = 5_12
A_ : List[Any] = 20_48
elif "huge" in model_name:
A_ : Union[str, Any] = 12_80
A_ : Union[str, Any] = 51_20
A_ : List[Any] = 32
A_ : Tuple = 16
A_ : List[str] = 12
A_ : List[str] = 8
A_ : List[Any] = 6_40
A_ : int = 25_60
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
if "encoder." in name:
A_ : int = name.replace('encoder.' ,'' )
if "cls_token" in name:
A_ : List[Any] = name.replace('cls_token' ,'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
A_ : List[str] = name.replace('decoder_pos_embed' ,'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
A_ : Tuple = name.replace('pos_embed' ,'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A_ : Optional[Any] = name.replace('patch_embed.proj' ,'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A_ : int = name.replace('patch_embed.norm' ,'videomae.embeddings.norm' )
if "decoder.blocks" in name:
A_ : Union[str, Any] = name.replace('decoder.blocks' ,'decoder.decoder_layers' )
if "blocks" in name:
A_ : Dict = name.replace('blocks' ,'videomae.encoder.layer' )
if "attn.proj" in name:
A_ : str = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name and "bias" not in name:
A_ : Optional[int] = name.replace('attn' ,'attention.self' )
if "attn" in name:
A_ : str = name.replace('attn' ,'attention.attention' )
if "norm1" in name:
A_ : int = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
A_ : Tuple = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
A_ : int = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
A_ : Any = name.replace('mlp.fc2' ,'output.dense' )
if "decoder_embed" in name:
A_ : Union[str, Any] = name.replace('decoder_embed' ,'decoder.decoder_embed' )
if "decoder_norm" in name:
A_ : str = name.replace('decoder_norm' ,'decoder.decoder_norm' )
if "decoder_pred" in name:
A_ : str = name.replace('decoder_pred' ,'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A_ : Dict = name.replace('norm.weight' ,'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A_ : int = name.replace('norm.bias' ,'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
A_ : Dict = name.replace('head' ,'classifier' )
return name
def UpperCamelCase ( __lowercase : int ,__lowercase : Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : Dict = orig_state_dict.pop(__lowercase )
if key.startswith('encoder.' ):
A_ : Optional[int] = key.replace('encoder.' ,'' )
if "qkv" in key:
A_ : Any = key.split('.' )
if key.startswith('decoder.blocks' ):
A_ : Optional[Any] = config.decoder_hidden_size
A_ : Optional[int] = int(key_split[2] )
A_ : List[Any] = 'decoder.decoder_layers.'
if "weight" in key:
A_ : Optional[Any] = val[:dim, :]
A_ : Tuple = val[dim : dim * 2, :]
A_ : Union[str, Any] = val[-dim:, :]
else:
A_ : str = config.hidden_size
A_ : Optional[Any] = int(key_split[1] )
A_ : Optional[int] = 'videomae.encoder.layer.'
if "weight" in key:
A_ : Tuple = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : Dict = val[-dim:, :]
else:
A_ : Optional[int] = val
return orig_state_dict
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' ,filename='eating_spaghetti.npy' ,repo_type='dataset' )
A_ : Optional[int] = np.load(__lowercase )
return list(__lowercase )
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Any ,__lowercase : Union[str, Any] ,__lowercase : Any ):
'''simple docstring'''
A_ : Dict = get_videomae_config(__lowercase )
if "finetuned" in model_name:
A_ : List[str] = VideoMAEForVideoClassification(__lowercase )
else:
A_ : Tuple = VideoMAEForPreTraining(__lowercase )
# download original checkpoint, hosted on Google Drive
A_ : Optional[int] = 'pytorch_model.bin'
gdown.cached_download(__lowercase ,__lowercase ,quiet=__lowercase )
A_ : Union[str, Any] = torch.load(__lowercase ,map_location='cpu' )
if "model" in files:
A_ : int = files['model']
else:
A_ : Union[str, Any] = files['module']
A_ : Tuple = convert_state_dict(__lowercase ,__lowercase )
model.load_state_dict(__lowercase )
model.eval()
# verify model on basic input
A_ : int = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
A_ : Tuple = prepare_video()
A_ : Union[str, Any] = image_processor(__lowercase ,return_tensors='pt' )
if "finetuned" not in model_name:
A_ : List[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' )
A_ : Optional[Any] = torch.load(__lowercase )
A_ : str = model(**__lowercase )
A_ : Tuple = outputs.logits
A_ : Dict = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Dict = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
A_ : Optional[int] = torch.Size([1, 1_74] )
A_ : List[str] = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
A_ : Any = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
A_ : Optional[int] = torch.Size([1, 14_08, 15_36] )
A_ : Union[str, Any] = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
A_ : Dict = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
A_ : Tuple = torch.Size([1, 14_08, 15_36] )
A_ : Optional[int] = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
A_ : Tuple = torch.Size([1, 4_00] )
A_ : Union[str, Any] = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
A_ : Dict = torch.Size([1, 4_00] )
A_ : List[Any] = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A_ : Any = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
A_ : Any = torch.Size([1, 4_00] )
A_ : Any = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
A_ : str = torch.Size([1, 14_08, 15_36] )
A_ : Tuple = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A_ : Dict = torch.Size([1, 1_74] )
A_ : List[str] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : Optional[int] = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : Tuple = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] ,__lowercase ,atol=1e-4 )
else:
print('Logits:' ,logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] ,__lowercase ,atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
A_ : Tuple = outputs.loss
assert torch.allclose(__lowercase ,__lowercase ,atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
model.save_pretrained(__lowercase )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(__lowercase ,organization='nielsr' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 717
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = IFInpaintingPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def __snake_case ( self : int ) -> str:
return self._get_dummy_components()
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Dict=0 ) -> Dict:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Any = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Tuple ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self : Union[str, Any] ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __snake_case ( self : Optional[int] ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self : Any ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self : List[Any] ) -> Tuple:
self._test_save_load_local()
def __snake_case ( self : List[Any] ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 81
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__snake_case :Optional[Any] =logging.get_logger(__name__)
__snake_case :int =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__snake_case :Optional[int] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(_lowerCamelCase )} )
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
A_ : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : int = field(
default=1_2_8 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
A_ : int = field(
default=6_4 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
A_ : int = field(
default=3_0 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
A_ : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
A_ : int = field(
default=2_0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
A_ : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
A_ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = 'train'
A_ : str = 'dev'
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : SquadDataTrainingArguments
A_ : List[SquadFeatures]
A_ : Split
A_ : bool
def __init__( self : Optional[int] , __UpperCamelCase : SquadDataTrainingArguments , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Union[str, Split] = Split.train , __UpperCamelCase : Optional[bool] = False , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = "pt" , ) -> Any:
A = args
A = is_language_sensitive
A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
A = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A = mode
# Load data features from cache or dataset file
A = 'v2' if args.version_2_with_negative else 'v1'
A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(__UpperCamelCase ):
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
A = time.time()
A = torch.load(__UpperCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A = self.old_features['features']
A = self.old_features.get('dataset' , __UpperCamelCase )
A = self.old_features.get('examples' , __UpperCamelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
A = self.processor.get_dev_examples(args.data_dir )
else:
A = self.processor.get_train_examples(args.data_dir )
A , A = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__UpperCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__UpperCamelCase , )
A = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __UpperCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Optional[Any] ) -> Tuple:
return len(self.features )
def __getitem__( self : Tuple , __UpperCamelCase : List[Any] ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
A = self.features[i]
A = torch.tensor(feature.input_ids , dtype=torch.long )
A = torch.tensor(feature.attention_mask , dtype=torch.long )
A = torch.tensor(feature.token_type_ids , dtype=torch.long )
A = torch.tensor(feature.cls_index , dtype=torch.long )
A = torch.tensor(feature.p_mask , dtype=torch.float )
A = torch.tensor(feature.is_impossible , dtype=torch.float )
A = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A = torch.tensor(feature.start_position , dtype=torch.long )
A = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 106
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=0) ->Optional[Any]:
"""simple docstring"""
if str(_UpperCamelCase).startswith("""mps"""):
_lowerCamelCase : int = torch.manual_seed(_UpperCamelCase)
else:
_lowerCamelCase : List[Any] = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase)
_lowerCamelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa)
_lowerCamelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""")
_lowerCamelCase : str = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCamelCase : str = None
_lowerCamelCase : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCamelCase : Optional[Any] = IFImgaImgPipeline(**pipe_a.components)
_lowerCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCamelCase : Any = IFInpaintingPipeline(**pipe_a.components)
_lowerCamelCase : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str) ->Tuple:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : str = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : int = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Any = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , num_inference_steps=2 , generator=_UpperCamelCase , output_type="""np""" , )
_lowerCamelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCamelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
# pipeline 2
_start_torch_memory_measurement()
_lowerCamelCase : Tuple = torch.Generator(device="""cpu""").manual_seed(0)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_UpperCamelCase)
_lowerCamelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_UpperCamelCase)
_lowerCamelCase : List[str] = pipe_a(
prompt_embeds=_UpperCamelCase , negative_prompt_embeds=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , original_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type="""np""" , )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""")
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
def A__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 711
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A__ ( __A , __A , __A , __A , __A , __A , __A , __A=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A__ ( __A , __A , __A , __A = False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCamelCase : List[str] = """cpu"""
_lowerCamelCase : Dict = Path(__A )
# VAE DECODER
_lowerCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCamelCase : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCamelCase : Tuple = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowerCAmelCase : Optional[Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 15
| 0
|
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
return x if y == 0 else greatest_common_divisor(snake_case , x % y )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
return (x * y) // greatest_common_divisor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( snake_case = 20 ) -> int:
__lowercase = 1
for i in range(1 , n + 1 ):
__lowercase = lcm(snake_case , snake_case )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 375
|
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> bool:
__lowercase = len(snake_case ) + 1
__lowercase = len(snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowercase = [[0 for i in range(snake_case )] for j in range(snake_case )]
# since string of zero length match pattern of zero length
__lowercase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case ):
__lowercase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case ):
__lowercase = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case ):
for j in range(1 , snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowercase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowercase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowercase = dp[i - 1][j]
else:
__lowercase = 0
else:
__lowercase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE_ : Any = '''aab'''
SCREAMING_SNAKE_CASE_ : str = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 375
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 ,number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 713
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCamelCase : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
snake_case_ : Tuple = SavedModel()
snake_case_ : Dict = []
with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
snake_case_ : Dict = json.load(__magic_name__ )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
snake_case_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case_ : str = sorted(__magic_name__ )
snake_case_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__magic_name__ ,sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowerCamelCase : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 656
| 0
|
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
if not (isinstance(__A ,__A ) and isinstance(__A ,__A )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__UpperCamelCase = len(__A )
__UpperCamelCase = len(__A )
__UpperCamelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__UpperCamelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__UpperCamelCase = i
__UpperCamelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''torch''', '''scipy''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""torch""", """scipy"""] )
| 601
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = "switch_transformers"
_SCREAMING_SNAKE_CASE : Dict = ["past_key_values"]
_SCREAMING_SNAKE_CASE : str = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__(self : Optional[int] , snake_case_ : Union[str, Any]=3_2_1_2_8 , snake_case_ : str=7_6_8 , snake_case_ : List[str]=6_4 , snake_case_ : Any=2_0_4_8 , snake_case_ : Dict=6_4 , snake_case_ : Any=1_2 , snake_case_ : List[str]=3 , snake_case_ : List[str]=1_2 , snake_case_ : Dict=3 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Optional[int]=8 , snake_case_ : Any=False , snake_case_ : Dict=0.01 , snake_case_ : Any="float32" , snake_case_ : int=False , snake_case_ : Dict=3_2 , snake_case_ : Any=1_2_8 , snake_case_ : List[str]=0.1 , snake_case_ : List[Any]=1E-6 , snake_case_ : Union[str, Any]=0.001 , snake_case_ : Optional[Any]=0.001 , snake_case_ : Union[str, Any]=1.0 , snake_case_ : str="relu" , snake_case_ : Any=True , snake_case_ : int=False , snake_case_ : int=True , snake_case_ : Tuple=0 , snake_case_ : List[str]=1 , **snake_case_ : Dict , ):
__a : Optional[Any] = vocab_size
__a : str = d_model
__a : str = d_kv
__a : str = d_ff
__a : Optional[Any] = num_sparse_encoder_layers
__a : Optional[int] = num_layers
__a : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a : str = self.num_layers // self.num_sparse_encoder_layers
else:
__a : int = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a : Optional[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a : Dict = num_heads
__a : Union[str, Any] = num_experts
__a : str = expert_capacity
__a : Dict = router_bias
__a : int = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__a : Optional[Any] = router_dtype
__a : List[Any] = router_ignore_padding_tokens
__a : Union[str, Any] = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : int = dropout_rate
__a : Optional[int] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Optional[Any] = use_cache
__a : Optional[int] = add_router_probs
__a : Optional[Any] = router_z_loss_coef
__a : Optional[int] = router_aux_loss_coef
__a : List[str] = self.feed_forward_proj.split('''-''' )
__a : Union[str, Any] = act_info[-1]
__a : Union[str, Any] = act_info[0] == '''gated'''
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a : Union[str, Any] = '''gelu_new'''
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ , )
| 717
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ):
__a : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__a : List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('''RGB''' )
__a : Union[str, Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
__a : Optional[Any] = transform(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
return image
def __UpperCamelCase ( lowerCAmelCase__ : int ):
if "visual_encoder" in key:
__a : Union[str, Any] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCAmelCase__ )
if "blocks" in key:
__a : Optional[int] = re.sub(R'''blocks''' , '''layers''' , lowerCAmelCase__ )
if "attn" in key:
__a : Optional[int] = re.sub(R'''attn''' , '''self_attn''' , lowerCAmelCase__ )
if "norm1" in key:
__a : List[Any] = re.sub(R'''norm1''' , '''layer_norm1''' , lowerCAmelCase__ )
if "norm2" in key:
__a : List[Any] = re.sub(R'''norm2''' , '''layer_norm2''' , lowerCAmelCase__ )
if "encoder.norm" in key:
__a : str = re.sub(R'''encoder.norm''' , '''post_layernorm''' , lowerCAmelCase__ )
if "encoder.patch_embed.proj" in key:
__a : str = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCAmelCase__ )
if "encoder.pos_embed" in key:
__a : Tuple = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCAmelCase__ )
if "encoder.cls_token" in key:
__a : Any = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCAmelCase__ )
if "self_attn" in key:
__a : int = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , lowerCAmelCase__ )
return key
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any]=None ):
if config_path is not None:
__a : int = BlipConfig.from_pretrained(lowerCAmelCase__ )
else:
__a : int = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__a : List[str] = BlipForConditionalGeneration(lowerCAmelCase__ ).eval()
__a : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__a : Any = blip_decoder(pretrained=lowerCAmelCase__ , image_size=3_8_4 , vit='''base''' )
__a : Union[str, Any] = pt_model.eval()
__a : Tuple = pt_model.state_dict()
for key in modified_state_dict.copy():
__a : Tuple = modified_state_dict.pop(lowerCAmelCase__ )
__a : List[Any] = rename_key(lowerCAmelCase__ )
__a : Optional[Any] = value
hf_model.load_state_dict(lowerCAmelCase__ )
__a : Union[str, Any] = 3_8_4
__a : Tuple = load_demo_image(image_size=lowerCAmelCase__ , device='''cpu''' )
__a : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a : Union[str, Any] = tokenizer(['''a picture of'''] ).input_ids
__a : List[str] = hf_model.generate(lowerCAmelCase__ , lowerCAmelCase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__a : Optional[Any] = hf_model.generate(lowerCAmelCase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__a : Any = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__a : Tuple = blip_vqa(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit='''base''' )
vqa_model.eval()
__a : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__a : List[Any] = modified_state_dict.pop(lowerCAmelCase__ )
__a : Dict = rename_key(lowerCAmelCase__ )
__a : Dict = value
__a : List[str] = BlipForQuestionAnswering(lowerCAmelCase__ )
hf_vqa_model.load_state_dict(lowerCAmelCase__ )
__a : Union[str, Any] = ['''How many dogs are in this image?''']
__a : Tuple = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' ).input_ids
__a : Union[str, Any] = hf_vqa_model.generate(lowerCAmelCase__ , lowerCAmelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__a : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__a : Dict = blip_itm(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit='''base''' )
itm_model.eval()
__a : Any = itm_model.state_dict()
for key in modified_state_dict.copy():
__a : Dict = modified_state_dict.pop(lowerCAmelCase__ )
__a : int = rename_key(lowerCAmelCase__ )
__a : Optional[int] = value
__a : Any = BlipForImageTextRetrieval(lowerCAmelCase__ )
__a : List[Any] = ['''A picture of a woman with a dog sitting in a beach''']
__a : Optional[int] = tokenizer(
lowerCAmelCase__ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase__ )
hf_itm_model.eval()
__a : int = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ )
__a : Dict = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase__ =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 326
| 0
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase: Union[str, Any] = logging.get_logger(__name__)
_lowercase: Union[str, Any] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ ="conditional_detr"
UpperCamelCase__ =["past_key_values"]
UpperCamelCase__ ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any]=True , lowercase__ : Dict=None , lowercase__ : Dict=3 , lowercase__ : List[str]=3_00 , lowercase__ : Tuple=6 , lowercase__ : List[Any]=20_48 , lowercase__ : str=8 , lowercase__ : Optional[int]=6 , lowercase__ : Any=20_48 , lowercase__ : Tuple=8 , lowercase__ : Optional[int]=0.0 , lowercase__ : Optional[int]=0.0 , lowercase__ : Any=True , lowercase__ : List[str]="relu" , lowercase__ : Union[str, Any]=2_56 , lowercase__ : Optional[int]=0.1 , lowercase__ : Dict=0.0 , lowercase__ : Dict=0.0 , lowercase__ : Tuple=0.0_2 , lowercase__ : Any=1.0 , lowercase__ : Optional[int]=False , lowercase__ : Optional[int]="sine" , lowercase__ : Dict="resnet50" , lowercase__ : List[str]=True , lowercase__ : List[Any]=False , lowercase__ : Dict=2 , lowercase__ : Optional[int]=5 , lowercase__ : Dict=2 , lowercase__ : str=1 , lowercase__ : str=1 , lowercase__ : Optional[int]=2 , lowercase__ : Union[str, Any]=5 , lowercase__ : Dict=2 , lowercase__ : Optional[Any]=0.2_5 , **lowercase__ : List[str] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase = backbone_config.get('model_type' )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(lowercase__ )
_lowerCAmelCase = use_timm_backbone
_lowerCAmelCase = backbone_config
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_queries
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = auxiliary_loss
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = backbone
_lowerCAmelCase = use_pretrained_backbone
_lowerCAmelCase = dilation
# Hungarian matcher
_lowerCAmelCase = class_cost
_lowerCAmelCase = bbox_cost
_lowerCAmelCase = giou_cost
# Loss coefficients
_lowerCAmelCase = mask_loss_coefficient
_lowerCAmelCase = dice_loss_coefficient
_lowerCAmelCase = cls_loss_coefficient
_lowerCAmelCase = bbox_loss_coefficient
_lowerCAmelCase = giou_loss_coefficient
_lowerCAmelCase = focal_alpha
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return 12
| 192
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCamelCase__ :
def __init__( self : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Dict=99 , lowercase__ : List[Any]=13 , lowercase__ : Union[str, Any]=7 , lowercase__ : int=9 , lowercase__ : List[str]=True , lowercase__ : str=True , lowercase__ : Any=False , lowercase__ : int=32 , lowercase__ : Any=5 , lowercase__ : int=4 , lowercase__ : int=37 , lowercase__ : List[str]=8 , lowercase__ : Optional[int]=0.1 , lowercase__ : List[Any]=0.0_0_2 , lowercase__ : str=1 , lowercase__ : List[str]=0 , lowercase__ : Optional[int]=0 , lowercase__ : List[str]=None , lowercase__ : Any=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = encoder_seq_length
_lowerCAmelCase = decoder_seq_length
# For common tests
_lowerCAmelCase = self.decoder_seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = d_ff
_lowerCAmelCase = relative_attention_num_buckets
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = decoder_start_token_id
_lowerCAmelCase = None
_lowerCAmelCase = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self : str ):
return TaConfig.from_pretrained('google/umt5-base' )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : int , lowercase__ : int=None , lowercase__ : str=None , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : List[Any]=None , ):
if attention_mask is None:
_lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase__ )
if decoder_head_mask is None:
_lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase__ )
if cross_attn_head_mask is None:
_lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = config.num_attention_heads
_lowerCAmelCase = self.prepare_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : List[str] , lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Dict , ):
_lowerCAmelCase = UMTaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
input_ids=lowercase__ , decoder_input_ids=lowercase__ , attention_mask=lowercase__ , decoder_attention_mask=lowercase__ , )
_lowerCAmelCase = model(input_ids=lowercase__ , decoder_input_ids=lowercase__ )
_lowerCAmelCase = result.last_hidden_state
_lowerCAmelCase = result.past_key_values
_lowerCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : str , lowercase__ : List[Any] , ):
_lowerCAmelCase = UMTaModel(config=lowercase__ ).get_decoder().to(lowercase__ ).eval()
# first forward pass
_lowerCAmelCase = model(lowercase__ , use_cache=lowercase__ )
_lowerCAmelCase = model(lowercase__ )
_lowerCAmelCase = model(lowercase__ , use_cache=lowercase__ )
self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) )
self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) + 1 )
_lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = model(lowercase__ )['last_hidden_state']
_lowerCAmelCase = model(lowercase__ , past_key_values=lowercase__ )['last_hidden_state']
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : Dict , ):
_lowerCAmelCase = UMTaModel(config=lowercase__ ).to(lowercase__ ).half().eval()
_lowerCAmelCase = model(**lowercase__ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(lowercase__ ).any().item() )
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase__ =(
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =True
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =True
UpperCamelCase__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase__ =[0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=lowercase__ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs[0]
_lowerCAmelCase = UMTaForConditionalGeneration(lowercase__ ).eval()
model.to(lowercase__ )
_lowerCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=lowercase__ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase__ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase__ ),
}
for attn_name, (name, mask) in zip(lowercase__ , head_masking.items() ):
_lowerCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase__ )
_lowerCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=lowercase__ , return_dict_in_generate=lowercase__ , **lowercase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=lowercase__ ).to(lowercase__ )
_lowerCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=lowercase__ , legacy=lowercase__ )
_lowerCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowerCAmelCase = tokenizer(lowercase__ , return_tensors='pt' , padding=lowercase__ ).input_ids
# fmt: off
_lowerCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase__ , lowercase__ )
_lowerCAmelCase = model.generate(input_ids.to(lowercase__ ) )
_lowerCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowerCAmelCase = tokenizer.batch_decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 192
| 1
|
'''simple docstring'''
def _snake_case ( A , A , A ) -> float:
return round(float(moles / volume ) * nfactor )
def _snake_case ( A , A , A ) -> float:
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def _snake_case ( A , A , A ) -> float:
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def _snake_case ( A , A , A ) -> float:
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
def _snake_case ( A , A ) -> bool:
lowerCAmelCase__ = len(A ) + 1
lowerCAmelCase__ = len(A ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(A )] for j in range(A )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , A ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , A ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , A ):
for j in range(1 , A ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__UpperCAmelCase = '''aab'''
__UpperCAmelCase = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 98
| 0
|
'''simple docstring'''
import cmath
import math
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = math.radians(UpperCAmelCase )
A__ = math.radians(UpperCAmelCase )
# Convert voltage and current to rectangular form
A__ = cmath.rect(UpperCAmelCase ,UpperCAmelCase )
A__ = cmath.rect(UpperCAmelCase ,UpperCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 531
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = TransfoXLTokenizer
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Optional[Any] , **A_ : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : List[str] , A_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = '<unk> UNwanted , running'
lowerCamelCase_ = '<unk> unwanted, running'
return input_text, output_text
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
lowerCamelCase_ = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def a__ ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
lowerCamelCase_ = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
lowerCamelCase_ = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 70
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class _UpperCamelCase ( nn.Module ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = jnp.floataa
def UpperCAmelCase_ ( self :Optional[int] ) -> int:
UpperCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :Dict , lowerCamelCase :Tuple ) -> Any:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = hidden_states.shape
UpperCAmelCase__ = jax.image.resize(
lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
UpperCAmelCase__ = self.conv(lowerCamelCase )
return hidden_states
class _UpperCamelCase ( nn.Module ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = jnp.floataa
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :Tuple , lowerCamelCase :str ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
UpperCAmelCase__ = self.conv(lowerCamelCase )
return hidden_states
class _UpperCamelCase ( nn.Module ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = None
UpperCAmelCase_ = jnp.floataa
def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase__ = self.in_channels if self.out_channels is None else self.out_channels
UpperCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase__ = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase__ = nn.Dense(lowerCamelCase , dtype=self.dtype )
UpperCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase__ = nn.Dropout(self.dropout_prob )
UpperCAmelCase__ = nn.Conv(
lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCAmelCase__ = None
if use_nin_shortcut:
UpperCAmelCase__ = nn.Conv(
lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self :int , lowerCamelCase :List[str] , lowerCamelCase :int , lowerCamelCase :str=True ) -> List[str]:
UpperCAmelCase__ = hidden_states
UpperCAmelCase__ = self.norma(lowerCamelCase )
UpperCAmelCase__ = nn.swish(lowerCamelCase )
UpperCAmelCase__ = self.conva(lowerCamelCase )
UpperCAmelCase__ = self.time_emb_proj(nn.swish(lowerCamelCase ) )
UpperCAmelCase__ = jnp.expand_dims(jnp.expand_dims(lowerCamelCase , 1 ) , 1 )
UpperCAmelCase__ = hidden_states + temb
UpperCAmelCase__ = self.norma(lowerCamelCase )
UpperCAmelCase__ = nn.swish(lowerCamelCase )
UpperCAmelCase__ = self.dropout(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
UpperCAmelCase__ = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 364
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCamelCase ( lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
UpperCAmelCase_ = None
UpperCAmelCase_ = None
@property
def UpperCAmelCase_ ( self :int ) -> int:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self :Any ) -> str:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase , "feature_size" ) )
self.assertTrue(hasattr(lowerCamelCase , "sampling_rate" ) )
self.assertTrue(hasattr(lowerCamelCase , "padding_value" ) )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase_ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase_ ( self :int , lowerCamelCase :int=False ) -> str:
def _inputs_have_equal_length(lowerCamelCase :Union[str, Any] ):
UpperCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase :Dict , lowerCamelCase :Optional[Any] ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = self.feat_extract_tester.seq_length_diff
UpperCAmelCase__ = self.feat_extract_tester.max_seq_length + pad_diff
UpperCAmelCase__ = self.feat_extract_tester.min_seq_length
UpperCAmelCase__ = self.feat_extract_tester.batch_size
UpperCAmelCase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="max_length" )[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , pad_to_multiple_of=10 )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , pad_to_multiple_of=10 )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCamelCase , return_tensors="np" , )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(all(len(lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCAmelCase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :int=False ) -> str:
def _inputs_have_equal_length(lowerCamelCase :Any ):
UpperCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase :Optional[int] , lowerCamelCase :str ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to smallest with np
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to middle
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase , return_tensors="np" , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="longest" , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="longest" , truncation=lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="max_length" , truncation=lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ = 12
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCAmelCase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCAmelCase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
def UpperCAmelCase_ ( self :int ) -> List[str]:
self._check_padding(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] ) -> int:
self._check_padding(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> str:
self._check_truncation(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> str:
self._check_truncation(numpify=lowerCamelCase )
@require_torch
def UpperCAmelCase_ ( self :int ) -> Any:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCAmelCase_ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase_ ( self :List[str] ) -> str:
UpperCAmelCase__ = self.feat_extract_dict
UpperCAmelCase__ = True
UpperCAmelCase__ = self.feature_extraction_class(**lowerCamelCase )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = [len(lowerCamelCase ) for x in speech_inputs]
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> int:
UpperCAmelCase__ = self.feat_extract_dict
UpperCAmelCase__ = True
UpperCAmelCase__ = self.feature_extraction_class(**lowerCamelCase )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = [len(lowerCamelCase ) for x in speech_inputs]
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = min(lowerCamelCase )
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 364
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
def _a ( lowercase__ : List[str] , lowercase__ : List[str]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = val
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _a ( lowercase__ : Any , lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE__ : List[Any] = 10_00
SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Optional[int] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : Dict = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Any = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : int = idalabel
SCREAMING_SNAKE_CASE__ : Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_92
SCREAMING_SNAKE_CASE__ : str = 7_68
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
elif deit_name[9:].startswith('small' ):
SCREAMING_SNAKE_CASE__ : List[Any] = 3_84
SCREAMING_SNAKE_CASE__ : Dict = 15_36
SCREAMING_SNAKE_CASE__ : Dict = 12
SCREAMING_SNAKE_CASE__ : List[str] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
SCREAMING_SNAKE_CASE__ : List[str] = 10_24
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 40_96
SCREAMING_SNAKE_CASE__ : List[str] = 24
SCREAMING_SNAKE_CASE__ : str = 16
# load original model from timm
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Optional[Any] = timm_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
SCREAMING_SNAKE_CASE__ : str = DeiTForImageClassificationWithTeacher(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE__ : Tuple = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTImageProcessor(size=lowercase__ , crop_size=config.image_size )
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Any = encoding['pixel_values']
SCREAMING_SNAKE_CASE__ : Tuple = model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 85
|
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Dict:
A__ = ["a", "b", "c"]
# Defaults to last layer if both are None
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
A__ , A__ = get_aligned_output_features_output_indices(["a", "c"] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def snake_case__ ( self ) -> Dict:
# Stage names must be set
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def snake_case__ ( self ) -> List[Any]:
A__ = BackboneMixin()
A__ = ["a", "b", "c"]
A__ = ["a", "c"]
A__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
A__ = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
A__ = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 104
| 0
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case_ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def lowercase__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case_ ):
http_head('''https://huggingface.co''' )
| 713
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = "realm"
def __init__( self : Optional[int] , _lowercase : Tuple=3_05_22 , _lowercase : List[str]=7_68 , _lowercase : Tuple=1_28 , _lowercase : int=12 , _lowercase : Tuple=12 , _lowercase : List[Any]=8 , _lowercase : Tuple=30_72 , _lowercase : Tuple="gelu_new" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : Optional[int]=2 , _lowercase : Any=0.02 , _lowercase : Union[str, Any]=1E-12 , _lowercase : Dict=2_56 , _lowercase : Optional[int]=10 , _lowercase : List[Any]=1E-3 , _lowercase : Optional[int]=5 , _lowercase : List[str]=3_20 , _lowercase : Optional[int]=13_35_37_18 , _lowercase : List[Any]=50_00 , _lowercase : Dict=1 , _lowercase : int=0 , _lowercase : Any=2 , **_lowercase : Optional[Any] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
# Common config
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = retriever_proj_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_candidates
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = layer_norm_eps
# Reader config
__UpperCAmelCase = span_hidden_size
__UpperCAmelCase = max_span_width
__UpperCAmelCase = reader_layer_norm_eps
__UpperCAmelCase = reader_beam_size
__UpperCAmelCase = reader_seq_len
# Retrieval config
__UpperCAmelCase = num_block_records
__UpperCAmelCase = searcher_beam_size
| 397
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowercase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True})
__lowercase : ClassVar[Features] = Features({'''question''': Value('''string'''), '''context''': Value('''string''')})
__lowercase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string'''),
'''answer_start''': Value('''int32'''),
})
})
__lowercase : str = "question"
__lowercase : str = "context"
__lowercase : str = "answers"
@property
def lowerCAmelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 24
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """owlvit_text_model"""
def __init__( self , A_=4_9408 , A_=512 , A_=2048 , A_=12 , A_=8 , A_=16 , A_="quick_gelu" , A_=1e-5 , A_=0.0 , A_=0.02 , A_=1.0 , A_=0 , A_=4_9406 , A_=4_9407 , **A_ , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : List[str] = attention_dropout
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : Dict = initializer_factor
@classmethod
def UpperCamelCase__ ( cls , A_ , **A_ ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__lowerCAmelCase, __lowerCAmelCase : List[Any] = cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowerCAmelCase : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_ , **A_ )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """owlvit_vision_model"""
def __init__( self , A_=768 , A_=3072 , A_=12 , A_=12 , A_=3 , A_=768 , A_=32 , A_="quick_gelu" , A_=1e-5 , A_=0.0 , A_=0.02 , A_=1.0 , **A_ , ) ->int:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Dict = image_size
__lowerCAmelCase : Dict = patch_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : Optional[int] = attention_dropout
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Optional[int] = initializer_factor
@classmethod
def UpperCamelCase__ ( cls , A_ , **A_ ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__lowerCAmelCase, __lowerCAmelCase : str = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__lowerCAmelCase : Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_ , **A_ )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """owlvit"""
_UpperCamelCase = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6_592 , A_=True , **A_ , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
if text_config is None:
__lowerCAmelCase : int = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__lowerCAmelCase : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__lowerCAmelCase : Tuple = OwlViTTextConfig(**A_ )
__lowerCAmelCase : Union[str, Any] = OwlViTVisionConfig(**A_ )
__lowerCAmelCase : Optional[Any] = projection_dim
__lowerCAmelCase : List[Any] = logit_scale_init_value
__lowerCAmelCase : List[str] = return_dict
__lowerCAmelCase : Optional[Any] = 1.0
@classmethod
def UpperCamelCase__ ( cls , A_ , **A_ ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = cls.get_config_dict(A_ , **A_ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_ , **A_ )
@classmethod
def UpperCamelCase__ ( cls , A_ , A_ , **A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : int = text_config
__lowerCAmelCase : List[Any] = vision_config
return cls.from_dict(A_ , **A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__lowerCAmelCase : List[Any] = self.text_config.to_dict()
__lowerCAmelCase : Optional[Any] = self.vision_config.to_dict()
__lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class __lowercase (_UpperCAmelCase ):
@property
def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def UpperCamelCase__ ( self ) ->float:
'''simple docstring'''
return 1e-4
def UpperCamelCase__ ( self , A_ , A_ = -1 , A_ = -1 , A_ = None , ) ->Mapping[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A_ , seq_length=A_ , framework=A_ )
__lowerCAmelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=A_ , framework=A_ )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return 14
| 583
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ViTFeatureExtractor''']
lowerCAmelCase_ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
lowercase : Optional[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase__ ( _a : str ):
with open(_a , "rb" ) as f:
snake_case_ : Tuple = Image.open(_a )
return im.convert("RGB" )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the training data.'} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the validation data.'} )
A : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowerCAmelCase ( self ) -> Tuple:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(SCREAMING_SNAKE_CASE__ )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A : str = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'} )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase__ ( _a : Tuple ):
snake_case_ : List[str] = torch.stack([example["pixel_values"] for example in examples] )
snake_case_ : Optional[Any] = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _a , _a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : Tuple = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ : Dict = {}
if data_args.train_dir is not None:
snake_case_ : int = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
snake_case_ : List[str] = os.path.join(data_args.validation_dir , "**" )
snake_case_ : int = load_dataset(
"imagefolder" , data_files=_a , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ : Optional[int] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
snake_case_ : Union[str, Any] = dataset["train"].train_test_split(data_args.train_val_split )
snake_case_ : str = split["train"]
snake_case_ : Optional[int] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case_ : Union[str, Any] = dataset["train"].features["labels"].names
snake_case_ , snake_case_ : Optional[Any] = {}, {}
for i, label in enumerate(_a ):
snake_case_ : Optional[int] = str(_a )
snake_case_ : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case_ : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a : Optional[int] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case_ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_a ) , labelaid=_a , idalabel=_a , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case_ : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case_ : Optional[Any] = image_processor.size["shortest_edge"]
else:
snake_case_ : str = (image_processor.size["height"], image_processor.size["width"])
snake_case_ : Optional[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case_ : Union[str, Any] = Compose(
[
RandomResizedCrop(_a ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case_ : List[Any] = Compose(
[
Resize(_a ),
CenterCrop(_a ),
ToTensor(),
normalize,
] )
def train_transforms(_a : Optional[int] ):
snake_case_ : List[str] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_a : List[Any] ):
snake_case_ : int = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
snake_case_ : str = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
snake_case_ : List[str] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_a )
# Initalize our trainer
snake_case_ : Optional[Any] = Trainer(
model=_a , args=_a , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_a , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
snake_case_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : str = last_checkpoint
snake_case_ : Tuple = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("eval" , _a )
trainer.save_metrics("eval" , _a )
# Write model card and (optionally) push to hub
snake_case_ : Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
if __name__ == "__main__":
main()
| 568
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'trocr'
_lowercase = ['past_key_values']
_lowercase = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , __UpperCAmelCase=50_265 , __UpperCAmelCase=1_024 , __UpperCAmelCase=12 , __UpperCAmelCase=16 , __UpperCAmelCase=4_096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=512 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE_ : str =d_model
SCREAMING_SNAKE_CASE_ : Union[str, Any] =decoder_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] =decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple =decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[str] =activation_function
SCREAMING_SNAKE_CASE_ : Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : str =dropout
SCREAMING_SNAKE_CASE_ : Dict =attention_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] =activation_dropout
SCREAMING_SNAKE_CASE_ : List[str] =init_std
SCREAMING_SNAKE_CASE_ : Optional[int] =decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Dict =use_cache
SCREAMING_SNAKE_CASE_ : List[Any] =scale_embedding
SCREAMING_SNAKE_CASE_ : Tuple =use_learned_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] =layernorm_embedding
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 153
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = StableDiffusionDiffEditPipeline
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase = frozenset([] )
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_zero=__UpperCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE_ : str =CLIPTextModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Tuple ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Any =floats_tensor((1, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Any =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[Any] =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Dict =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
if not hasattr(self.pipeline_class , '_optional_components' ):
return
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =pipe(**__UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.pipeline_class.from_pretrained(__UpperCAmelCase )
pipe_loaded.to(__UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCAmelCase , __UpperCAmelCase ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe_loaded(**__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : str =np.abs(output - output_loaded ).max()
self.assertLess(__UpperCAmelCase , 1E-4 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] ='cpu'
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_mask_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
SCREAMING_SNAKE_CASE_ : str =np.array([0] * 9 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int ='cpu'
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =self.get_dummy_inversion_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe.invert(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : str =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE_ : Tuple =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
SCREAMING_SNAKE_CASE_ : int =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int ='cpu'
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Dict ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE_ : str =DPMSolverMultistepScheduler(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =DPMSolverMultistepInverseScheduler(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inversion_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =pipe.invert(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowerCamelCase ( cls ):
SCREAMING_SNAKE_CASE_ : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE_ : Any =raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE_ : Optional[int] =raw_image
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Dict =DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] ='a bowl of fruit'
SCREAMING_SNAKE_CASE_ : Optional[int] ='a bowl of pears'
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe.invert(
prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase ).latents
SCREAMING_SNAKE_CASE_ : List[Any] =pipe(
prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str ='a bowl of fruit'
SCREAMING_SNAKE_CASE_ : str ='a bowl of pears'
SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe.invert(
prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe(
prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE_ : List[str] =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 153
| 1
|
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 152
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__a: Optional[Any] = True
from torch.cuda.amp import autocast
__a: Optional[Any] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to log verbose messages or not."} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
SCREAMING_SNAKE_CASE = field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ : int = logging.WARNING
if model_args.verbose_logging:
lowercase__ : str = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase__ : List[Any] = logging.INFO
logger.setLevel(UpperCAmelCase )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
SCREAMING_SNAKE_CASE = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCAmelCase ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
lowercase__ : List[str] = self.feature_extractor.pad(
__lowerCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowercase__ : List[str] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
lowercase__ : Optional[Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase__ : str = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
lowercase__ : int = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase__ : Any = 1
lowercase__ : Dict = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase__ : List[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__lowerCAmelCase , min_masks=2 , )
return batch
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=1.0 , **__lowerCAmelCase ) -> int:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Union[str, Any] = 0
lowercase__ : List[str] = max_gumbel_temp
lowercase__ : Union[str, Any] = min_gumbel_temp
lowercase__ : List[Any] = gumbel_temp_decay
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> torch.Tensor:
model.train()
lowercase__ : List[Any] = self._prepare_inputs(__lowerCAmelCase )
if self.use_amp:
with autocast():
lowercase__ : List[Any] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
else:
lowercase__ : Optional[Any] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ : Union[str, Any] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowercase__ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : Dict = parser.parse_args_into_dataclasses()
configure_logger(UpperCAmelCase , UpperCAmelCase )
# Downloading and loading a dataset from the hub.
lowercase__ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase__ : Dict = DatasetDict()
lowercase__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
lowercase__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase__ : Any = DatasetDict()
lowercase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
lowercase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCAmelCase )
def prepare_dataset(UpperCAmelCase ):
# check that all files have the correct sampling rate
lowercase__ , lowercase__ : int = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase__ : int = datasets.map(
UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
lowercase__ : List[Any] = vectorized_datasets.filter(
lambda UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(UpperCAmelCase ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase__ : Tuple = vectorized_datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase__ : Union[str, Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
lowercase__ : int = WavaVecaForPreTraining(UpperCAmelCase )
lowercase__ : List[Any] = DataCollatorForWavaVecaPretraining(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
lowercase__ : Any = WavaVecaPreTrainer(
model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 152
| 1
|
from itertools import permutations
def lowerCamelCase__ (_UpperCAmelCase):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase__ (_UpperCAmelCase = 10):
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)))
for num in permutations(range(SCREAMING_SNAKE_CASE_))
if is_substring_divisible(SCREAMING_SNAKE_CASE_))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
a_ : set[int] = {ord(char) for char in VALID_CHARS}
a_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
for keychar, cipherchar in zip(cycle(_UpperCAmelCase) , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCAmelCase)
return decoded
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for key in product(_UpperCAmelCase , repeat=3):
SCREAMING_SNAKE_CASE = try_key(_UpperCAmelCase , _UpperCAmelCase)
if encoded is not None:
possibles.append(_UpperCAmelCase)
return possibles
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase__ (_UpperCAmelCase = "p059_cipher.txt"):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Path(_UpperCAmelCase).parent.joinpath(_UpperCAmelCase).read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE = [int(_UpperCAmelCase) for number in data.strip().split(',')]
SCREAMING_SNAKE_CASE = filter_valid_chars(_UpperCAmelCase)
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE = filter_common_word(_UpperCAmelCase , _UpperCAmelCase)
if len(_UpperCAmelCase) == 1:
break
SCREAMING_SNAKE_CASE = possibles[0]
return sum(ord(_UpperCAmelCase) for char in decoded_text)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 444
| 0
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: int = LxmertTokenizer
SCREAMING_SNAKE_CASE: Any = LxmertTokenizerFast
SCREAMING_SNAKE_CASE: Union[str, Any] = True
SCREAMING_SNAKE_CASE: Dict = True
def _a ( self ):
super().setUp()
lowerCAmelCase_: int = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: Dict = "UNwant\u00E9d,running"
lowerCAmelCase_: Optional[int] = "unwanted, running"
return input_text, output_text
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_: List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def _a ( self ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_: str = self.get_tokenizer()
lowerCAmelCase_: Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_: Any = "I was born in 92000, and this is falsé."
lowerCAmelCase_: Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
lowerCAmelCase_: List[str] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Tuple = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase_: Tuple = tokenizer.encode(lowerCamelCase__ )
lowerCAmelCase_: Dict = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 613
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a : List[str] = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 613
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = '''xlm-roberta'''
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE: Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE: Dict = hidden_act
__SCREAMING_SNAKE_CASE: str = intermediate_size
__SCREAMING_SNAKE_CASE: Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE: Dict = initializer_range
__SCREAMING_SNAKE_CASE: Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE: Optional[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE: List[Any] = use_cache
__SCREAMING_SNAKE_CASE: Union[str, Any] = classifier_dropout
class a ( __lowercase ):
@property
def snake_case_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE: Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE: int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 146
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE: Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__SCREAMING_SNAKE_CASE: Tuple = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__SCREAMING_SNAKE_CASE: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '''\n''' )
def snake_case_ ( self , **_lowerCAmelCase ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = '''tester'''
__SCREAMING_SNAKE_CASE: Tuple = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE: Any = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__SCREAMING_SNAKE_CASE: int = tokenizer.encode([special_token] , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
__SCREAMING_SNAKE_CASE: Tuple = tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = self.get_input_output_texts(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = tokenizer.tokenize(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertNotEqual(len(_lowerCAmelCase ) , 0 )
__SCREAMING_SNAKE_CASE: List[str] = tokenizer.decode(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCAmelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
| 146
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=10 , __SCREAMING_SNAKE_CASE : Any=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE : str=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[2, 3, 4] , __SCREAMING_SNAKE_CASE : str=1 , ) -> int:
a_ : List[str] = parent
a_ : Dict = batch_size
a_ : Dict = image_size
a_ : Tuple = num_channels
a_ : Any = embeddings_size
a_ : Optional[int] = hidden_sizes
a_ : int = depths
a_ : str = is_training
a_ : Union[str, Any] = use_labels
a_ : Optional[Any] = hidden_act
a_ : Union[str, Any] = num_labels
a_ : int = scope
a_ : List[Any] = len(__SCREAMING_SNAKE_CASE )
a_ : Tuple = out_features
a_ : Tuple = out_indices
a_ : List[str] = num_groups
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : List[Any] = None
if self.use_labels:
a_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
a_ : str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
a_ : Optional[Any] = BitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
a_ : Dict = self.num_labels
a_ : int = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : int = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> int:
a_ : int = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Dict = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a_ : Tuple = None
a_ : Dict = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : int = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ : Union[str, Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ : List[str] = config_and_inputs
a_ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Dict = BitModelTester(self )
a_ : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
a_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Tuple = [*signature.parameters.keys()]
a_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ):
a_ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
a_ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
a_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ : Optional[int] = layer_type
a_ : Optional[int] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Optional[int] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Tuple = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ):
a_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
a_ : Any = self.default_image_processor
a_ : List[str] = prepare_img()
a_ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
a_ : Any = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
a_ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (BitBackbone,) if is_torch_available() else ()
snake_case__ = BitConfig
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Any = BitModelTester(self )
| 466
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = 42
snake_case__ = 42
def _UpperCAmelCase ( __A : str ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(__A ) )]
def _UpperCAmelCase ( __A : str ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
a_ : str = all_rotations(__A )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__A ),
}
return response
def _UpperCAmelCase ( __A : str , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
a_ : Union[str, Any] = int(__A )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(__A ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
a_ : Union[str, Any] = [''''''] * len(__A )
for _ in range(len(__A ) ):
for i in range(len(__A ) ):
a_ : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__lowerCAmelCase = 'Provide a string that I will generate its BWT transform: '
__lowerCAmelCase = input(entry_msg).strip()
__lowerCAmelCase = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
__lowerCAmelCase = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 466
| 1
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase : Optional[int] = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
_snake_case : int = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCAmelCase_ , )
is not None
):
_snake_case : str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_snake_case : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_snake_case : Optional[Any] = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
_snake_case : str = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
_snake_case : List[Any] = True
if not attribute_used:
_snake_case : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_snake_case : Dict = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_snake_case : Union[str, Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_snake_case : Tuple = True
elif attribute.endswith('''_token_id''' ):
_snake_case : Any = True
# configuration class specific cases
if not case_allowed:
_snake_case : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_snake_case : Union[str, Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Any = dict(inspect.signature(config_class.__init__ ).parameters )
_snake_case : Dict = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
_snake_case : Optional[int] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_snake_case : List[str] = {}
if len(config_class.attribute_map ) > 0:
_snake_case : Optional[Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_snake_case : Union[str, Any] = inspect.getsourcefile(lowerCAmelCase_ )
_snake_case : Union[str, Any] = os.path.dirname(lowerCAmelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_snake_case : str = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for fn in os.listdir(lowerCAmelCase_ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
_snake_case : Optional[Any] = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ ) as fp:
modeling_sources.append(fp.read() )
_snake_case : Dict = []
for config_param, default_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
# `attributes` here is all the variant names for `config_param`
_snake_case : Any = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase_ )
def _a ( ):
"""simple docstring"""
_snake_case : Optional[int] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_snake_case : Tuple = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase_ : inspect.isclass(lowerCAmelCase_ )
and issubclass(lowerCAmelCase_ , lowerCAmelCase_ )
and inspect.getmodule(lowerCAmelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_snake_case : Dict = check_config_attributes_being_used(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_snake_case : str = unused_attributes
if len(lowerCAmelCase_ ) > 0:
_snake_case : Optional[Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 706
|
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
if start is None:
_snake_case : Optional[Any] = 0
if end is None:
_snake_case : Any = len(lowerCAmelCase_ ) - 1
if start >= end:
return
_snake_case : Optional[Any] = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__lowerCamelCase = False
class UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :Any = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ :str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase__ :str = torch.manual_seed(0 )
UpperCAmelCase__ :int = pipe.dual_guided(
prompt='''first prompt''' , image=__lowerCamelCase , text_to_image_strength=0.75 , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
UpperCAmelCase__ :Any = VersatileDiffusionPipeline.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ :Tuple = generator.manual_seed(0 )
UpperCAmelCase__ :Any = pipe.dual_guided(
prompt='''first prompt''' , image=__lowerCamelCase , text_to_image_strength=0.75 , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Optional[int] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = '''cyberpunk 2077'''
UpperCAmelCase__ :Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase__ :Any = torch.manual_seed(0 )
UpperCAmelCase__ :Optional[int] = pipe.dual_guided(
prompt=__lowerCamelCase , image=__lowerCamelCase , text_to_image_strength=0.75 , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
UpperCAmelCase__ :Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ :Optional[Any] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ :Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase__ :List[str] = torch.manual_seed(0 )
UpperCAmelCase__ :Union[str, Any] = pipe.text_to_image(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
UpperCAmelCase__ :Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ :Any = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ :Optional[int] = pipe.image_variation(__lowerCamelCase , generator=__lowerCamelCase , output_type='''numpy''' ).images
UpperCAmelCase__ :List[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ :str = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 467
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str ):
UpperCAmelCase__ :Any = list(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase__ :Union[str, Any] = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase_ )
def a__ ( UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :int = []
while True:
UpperCAmelCase__ :Dict = ['''$'''] * len(UpperCamelCase_ )
UpperCAmelCase__ :List[str] = []
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + 1, len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[Any] = compare_string(binary[i], binary[j] )
if k is False:
UpperCAmelCase__ :Any = '''*'''
UpperCAmelCase__ :List[Any] = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase_ ) == 0:
return pi
UpperCAmelCase__ :Tuple = list(set(UpperCamelCase_ ) )
def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : Sequence[float] ):
UpperCAmelCase__ :int = []
for minterm in minterms:
UpperCAmelCase__ :int = ''''''
for _ in range(UpperCamelCase_ ):
UpperCAmelCase__ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase_ )
return temp
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : int ):
UpperCAmelCase__ :Dict = list(UpperCamelCase_ )
UpperCAmelCase__ :str = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :Optional[Any] = []
UpperCAmelCase__ :List[Any] = [0] * len(UpperCamelCase_ )
for i in range(len(chart[0] ) ):
UpperCAmelCase__ :Optional[Any] = 0
UpperCAmelCase__ :Union[str, Any] = -1
for j in range(len(UpperCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase__ :Any = j
if count == 1:
UpperCAmelCase__ :Any = 1
for i in range(len(UpperCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :int = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase__ :Optional[int] = 0
UpperCAmelCase__ :Dict = -1
UpperCAmelCase__ :Optional[Any] = 0
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase__ :Any = count_n
UpperCAmelCase__ :List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[int] = 0
def a__ ( UpperCamelCase_ : list[str], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :List[str] = [[0 for x in range(len(UpperCamelCase_ ) )] for x in range(len(UpperCamelCase_ ) )]
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Tuple = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase_ ) ):
if is_for_table(prime_implicants[i], binary[j], UpperCamelCase_ ):
UpperCAmelCase__ :List[str] = 1
return chart
def a__ ( ):
UpperCAmelCase__ :int = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase__ :Tuple = [
float(UpperCamelCase_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase__ :Union[str, Any] = decimal_to_binary(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Optional[Any] = check(UpperCamelCase_ )
print('''Prime Implicants are:''' )
print(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = prime_implicant_chart(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Dict = selection(UpperCamelCase_, UpperCamelCase_ )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 467
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowerCamelCase : List[Any] = "src/diffusers"
# Matches is_xxx_available()
__lowerCamelCase : List[Any] = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
__lowerCamelCase : List[Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
__lowerCamelCase : List[str] = "\n{0} = None\n"
__lowerCamelCase : Dict = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
__lowerCamelCase : Union[str, Any] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]:
UpperCAmelCase = _re_backend.findall(lowerCamelCase_ )
if len(lowerCamelCase_ ) == 0:
return None
return "_and_".join(lowerCamelCase_ )
def lowerCamelCase_() -> Any:
with open(os.path.join(lowerCamelCase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(lowerCamelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(lowerCamelCase_ ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCamelCase_ ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if name.isupper():
return DUMMY_CONSTANT.format(lowerCamelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCamelCase_ , lowerCamelCase_ )
else:
return DUMMY_CLASS.format(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_=None ) -> List[str]:
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F'"{b}"' for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCamelCase_ , lowerCamelCase_ ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def lowerCamelCase_(lowerCamelCase_=False ) -> Optional[Any]:
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(lowerCamelCase_ , "utils" )
UpperCAmelCase = {
backend: os.path.join(lowerCamelCase_ , F'dummy_{short_names.get(lowerCamelCase_ , lowerCamelCase_ )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCamelCase_ ):
with open(lowerCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(lowerCamelCase_ , lowerCamelCase_ )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'diffusers.utils.dummy_{short_names.get(lowerCamelCase_ , lowerCamelCase_ )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCamelCase : Dict = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 716
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "hf-internal-testing/tiny-random-t5"
UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = tokenizer("This is me" , return_tensors="pt" )
UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase = model.generate(**UpperCamelCase__ )
UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase = model_reloaded.generate(**UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "hf-internal-testing/tiny-random-t5"
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase__ ):
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase__ )
| 457
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase ( lowerCAmelCase_ ):
A_ : int = 0
A_ : bool = False
A_ : float = 3.0
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : List[str] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case_ ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
lowerCAmelCase__ : Dict = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowerCAmelCase__ : Tuple = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case_ )
@require_multi_gpu
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
snake_case = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case = torch.nn.Linear(1_00, 2_00)
snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case = ''''''
snake_case = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 378
|
'''simple docstring'''
import math
import sys
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = ""
try:
with open(__lowercase , "rb" ) as binary_file:
_UpperCAmelCase = binary_file.read()
for dat in data:
_UpperCAmelCase = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = {"0": "0", "1": "1"}
_UpperCAmelCase , _UpperCAmelCase = "", ""
_UpperCAmelCase = len(__lowercase )
for i in range(len(__lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
_UpperCAmelCase = last_match_id + "0"
if math.loga(__lowercase ).is_integer():
_UpperCAmelCase = {}
for curr_key in list(__lowercase ):
_UpperCAmelCase = lexicon.pop(__lowercase )
_UpperCAmelCase = new_lex
_UpperCAmelCase = last_match_id + "1"
index += 1
_UpperCAmelCase = ""
return result
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = 8
try:
with open(__lowercase , "wb" ) as opened_file:
_UpperCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowercase ) , __lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowercase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_UpperCAmelCase = data_bits[counter:]
_UpperCAmelCase = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = read_file_binary(__lowercase )
_UpperCAmelCase = remove_prefix(__lowercase )
_UpperCAmelCase = decompress_data(__lowercase )
write_file_binary(__lowercase , __lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 236
| 0
|
from __future__ import annotations
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCAmelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCAmelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> List[Any]:
"""simple docstring"""
snake_case = {}
if train_file is not None:
snake_case = [train_file]
if eval_file is not None:
snake_case = [eval_file]
if test_file is not None:
snake_case = [test_file]
snake_case = datasets.load_dataset("csv" , data_files=A__ )
snake_case = list(ds[list(files.keys() )[0]].features.keys() )
snake_case = features_name.pop(A__ )
snake_case = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case = {label: i for i, label in enumerate(A__ )}
snake_case = tokenizer.model_input_names
snake_case = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding="max_length" ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding="max_length" , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : int = field(metadata={"help": "Which column contains the label"} )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "The path of the training file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the development file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the test file"} )
UpperCAmelCase__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate()
snake_case = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(A__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 294
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase__( UpperCamelCase__ : Any )->Union[str, Any]:
A__ = FileLock(str(tmpdir / '''foo.lock''' ) )
A__ = FileLock(str(tmpdir / '''foo.lock''' ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
A__ = time.time()
locka.acquire(UpperCamelCase__ )
assert time.time() - _start > timeout
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] )->int:
A__ = '''a''' * 10_00 + '''.lock'''
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(UpperCamelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
locka.acquire(0 )
| 190
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
def __a ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase = [[1, 2, 4], [1, 2, 3, 4]]
lowercase = DisjunctiveConstraint(__lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCamelCase ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __a ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(__lowerCamelCase ) # fails here
def __a ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase = [[1, 2, 3], [1, 2, 4]]
lowercase = DisjunctiveConstraint(__lowerCamelCase )
lowercase ,lowercase ,lowercase = dc.update(1 )
lowercase = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase ,lowercase ,lowercase = dc.update(2 )
lowercase = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase ,lowercase ,lowercase = dc.update(3 )
lowercase = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __a ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase = DisjunctiveConstraint(__lowerCamelCase )
lowercase ,lowercase ,lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase ,lowercase ,lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase ,lowercase ,lowercase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase ,lowercase ,lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase ,lowercase ,lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase ,lowercase ,lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase ,lowercase ,lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 604
| 0
|
"""simple docstring"""
import math
def lowercase_ ( ):
"""simple docstring"""
A_ : Dict = input('''Enter message: ''' )
A_ : Optional[int] = int(input(f"""Enter key [2-{len(_UpperCAmelCase ) - 1}]: """ ) )
A_ : str = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
A_ : Optional[int] = encrypt_message(_UpperCAmelCase , _UpperCAmelCase )
elif mode.lower().startswith('''d''' ):
A_ : Any = decrypt_message(_UpperCAmelCase , _UpperCAmelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + '|'}""" )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = [''''''] * key
for col in range(_UpperCAmelCase ):
A_ : str = col
while pointer < len(_UpperCAmelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_UpperCAmelCase )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = math.ceil(len(_UpperCAmelCase ) / key )
A_ : Tuple = key
A_ : Union[str, Any] = (num_cols * num_rows) - len(_UpperCAmelCase )
A_ : int = [''''''] * num_cols
A_ : int = 0
A_ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A_ : Dict = 0
row += 1
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 361
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : int = """umt5"""
__lowerCAmelCase : List[str] = ["""past_key_values"""]
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=25_01_12 , _lowerCamelCase : Any=5_12 , _lowerCamelCase : Optional[int]=64 , _lowerCamelCase : str=10_24 , _lowerCamelCase : List[str]=8 , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=6 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Optional[Any]=1_28 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Union[str, Any]=1E-6 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Optional[int]="gated-gelu" , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=True , _lowerCamelCase : Tuple="T5Tokenizer" , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Tuple=0 , **_lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=_lowerCamelCase , tokenizer_class=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
A_ : str = vocab_size
A_ : List[Any] = d_model
A_ : Optional[int] = d_kv
A_ : int = d_ff
A_ : Union[str, Any] = num_layers
A_ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Dict = num_heads
A_ : Optional[int] = relative_attention_num_buckets
A_ : Union[str, Any] = relative_attention_max_distance
A_ : Any = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Tuple = initializer_factor
A_ : Optional[int] = feed_forward_proj
A_ : Dict = use_cache
A_ : Any = self.feed_forward_proj.split('''-''' )
A_ : Tuple = act_info[-1]
A_ : Any = act_info[0] == '''gated'''
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
A_ : Dict = '''gelu_new'''
@property
def a_ ( self : Tuple ):
"""simple docstring"""
return self.d_model
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return self.num_heads
@property
def a_ ( self : int ):
"""simple docstring"""
return self.num_layers
class lowercase ( __UpperCAmelCase):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a_ ( self : Any ):
"""simple docstring"""
A_ : str = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A_ : Optional[Any] = '''past_encoder_sequence + sequence'''
A_ : Optional[Any] = {0: '''batch'''}
A_ : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
return 13
@property
def a_ ( self : Any ):
"""simple docstring"""
return 5E-4
| 361
| 1
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[Any] , a_ : str ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as input_file:
__snake_case = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__snake_case = input_file.read()
__snake_case = regexp.search(a_ )
return match
def A ( self : Any , a_ : str ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as input_file:
__snake_case = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__snake_case = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__snake_case = regexp.finditer(a_ )
__snake_case = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = Path("./datasets" )
__snake_case = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a_ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = Path("./datasets" )
__snake_case = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(a_ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 69
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : List[str]=False):
'''simple docstring'''
lowerCAmelCase__ : int = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight"""))
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias"""))
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias"""))
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight"""))
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight"""))
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias"""))
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
return rename_keys
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[Any]=False):
'''simple docstring'''
for i in range(config.num_hidden_layers):
if base_model:
lowerCAmelCase__ : List[str] = ''''''
else:
lowerCAmelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : List[str] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""")
lowerCAmelCase__ : List[Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Any = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Dict = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : int ,lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : str = dct.pop(lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = val
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViTMSNConfig()
lowerCAmelCase__ : int = 1000
lowerCAmelCase__ : List[Any] = '''datasets/huggingface/label-files'''
lowerCAmelCase__ : Dict = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_) ,'''r'''))
lowerCAmelCase__ : Any = {int(lowerCamelCase_): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = idalabel
lowerCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ : str = 384
lowerCAmelCase__ : Any = 1536
lowerCAmelCase__ : List[str] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : int = 4096
lowerCAmelCase__ : Dict = 24
lowerCAmelCase__ : List[str] = 16
lowerCAmelCase__ : List[str] = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ : List[Any] = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ : Any = 7
lowerCAmelCase__ : Optional[int] = 1024
lowerCAmelCase__ : Optional[int] = 4096
lowerCAmelCase__ : Dict = 24
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Optional[Any] = 0.1
lowerCAmelCase__ : List[str] = ViTMSNModel(lowerCamelCase_)
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,map_location='''cpu''')['''target_encoder''']
lowerCAmelCase__ : List[str] = ViTImageProcessor(size=config.image_size)
remove_projection_head(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = create_rename_keys(lowerCamelCase_ ,base_model=lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ ,lowerCamelCase_ ,base_model=lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
lowerCAmelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
lowerCAmelCase__ : Tuple = ViTImageProcessor(
size=config.image_size ,image_mean=lowerCamelCase_ ,image_std=lowerCamelCase_)
lowerCAmelCase__ : int = image_processor(images=lowerCamelCase_ ,return_tensors='''pt''')
# forward pass
torch.manual_seed(2)
lowerCAmelCase__ : Optional[int] = model(**lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor([[-1.0915, -1.4876, -1.1809]])
elif "b16" in checkpoint_url:
lowerCAmelCase__ : List[str] = torch.tensor([[14.2889, -18.9045, 11.7281]])
elif "l16" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[41.5028, -22.8681, 45.6475]])
elif "b4" in checkpoint_url:
lowerCAmelCase__ : Dict = torch.tensor([[-4.3868, 5.2932, -0.4137]])
else:
lowerCAmelCase__ : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]])
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,lowerCamelCase_ ,atol=1E-4)
print(f"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase_)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : List[str] =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Any = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 151
|
def UpperCamelCase_ ( __a = 3 , __a = 7 , __a = 1_000_000 ) -> int:
a__ : List[Any] = 0
a__ : int = 1
for current_denominator in range(1 , limit + 1 ):
a__ : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ : int = current_numerator
a__ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 151
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( __snake_case ,__snake_case ,unittest.TestCase ):
__lowerCamelCase : int = IFInpaintingPipeline
__lowerCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> Any:
return self._get_dummy_components()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _snake_case ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ) -> List[str]:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 18
|
import numpy as np
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
| 0
|
import math
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 ) -> list:
lowerCamelCase =end or len(_UpperCAmelCase )
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =i
lowerCamelCase =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase =array[temp_index - 1]
temp_index -= 1
lowerCamelCase =temp_index_value
return array
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Max Heap
lowerCamelCase =index
lowerCamelCase =2 * index + 1 # Left Node
lowerCamelCase =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase =left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase =right_index
if largest != index:
lowerCamelCase , lowerCamelCase =array[largest], array[index]
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( _UpperCAmelCase ) -> list:
lowerCamelCase =len(_UpperCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase , lowerCamelCase =array[0], array[i]
heapify(_UpperCAmelCase , 0 , _UpperCAmelCase )
return array
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase =low
lowerCamelCase =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase , lowerCamelCase =array[j], array[i]
i += 1
def _lowercase ( _UpperCAmelCase ) -> list:
if len(_UpperCAmelCase ) == 0:
return array
lowerCamelCase =2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) )
lowerCamelCase =16
return intro_sort(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCAmelCase )
max_depth -= 1
lowerCamelCase =median_of_a(_UpperCAmelCase , _UpperCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase =partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
intro_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =p
return insertion_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : List[Any] =input('''Enter numbers separated by a comma : ''').strip()
UpperCAmelCase__ : List[Any] =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 269
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase__ : Optional[int] ={
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
UpperCAmelCase__ : str ={
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase =list(state_dict.keys() )
for name in state_dict_keys:
lowerCamelCase =state_dict.pop(_UpperCAmelCase )
# emb -> embedding
if name.startswith("""emb.""" ):
lowerCamelCase =name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
lowerCamelCase =name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
lowerCamelCase =re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _UpperCAmelCase )
# ffn -> feed_forward
lowerCamelCase =re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
lowerCamelCase =name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
lowerCamelCase =name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
lowerCamelCase =name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
lowerCamelCase ="""rwkv.""" + name
lowerCamelCase =weight
return state_dict
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Tuple:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
lowerCamelCase =5_02_77
lowerCamelCase =AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
lowerCamelCase =PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase )
lowerCamelCase =len(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
# 2. Build the config
lowerCamelCase =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCamelCase =candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
lowerCamelCase =RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase )
# 3. Download model file then convert state_dict
lowerCamelCase =hf_hub_download(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =torch.load(_UpperCAmelCase , map_location="""cpu""" )
lowerCamelCase =convert_state_dict(_UpperCAmelCase )
# 4. Split in shards and save
lowerCamelCase , lowerCamelCase =shard_checkpoint(_UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if index is not None:
lowerCamelCase =os.path.join(_UpperCAmelCase , _UpperCAmelCase )
# Save the index as well
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase =json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + """\n"""
f.write(_UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
lowerCamelCase =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCamelCase =torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
lowerCamelCase =AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
model.push_to_hub(_UpperCAmelCase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCAmelCase__ : List[Any] =parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 269
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "microsoft/speecht5_tts"
SCREAMING_SNAKE_CASE = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
SCREAMING_SNAKE_CASE = "text_reader"
SCREAMING_SNAKE_CASE = SpeechTaProcessor
SCREAMING_SNAKE_CASE = SpeechTaForTextToSpeech
SCREAMING_SNAKE_CASE = SpeechTaHifiGan
SCREAMING_SNAKE_CASE = ["text"]
SCREAMING_SNAKE_CASE = ["audio"]
def _lowerCAmelCase( self ) -> Dict:
if self.post_processor is None:
lowercase__ : Optional[Any] = '''microsoft/speecht5_hifigan'''
super().setup()
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None ) -> Tuple:
lowercase__ : Optional[Any] = self.pre_processor(text=__lowerCAmelCase , return_tensors='''pt''' , truncation=__lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowercase__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowercase__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
with torch.no_grad():
return self.model.generate_speech(**__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
with torch.no_grad():
return self.post_processor(__lowerCAmelCase ).cpu().detach()
| 152
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = 0
lowercase__ : int = len(UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __UpperCamelCase ( UpperCAmelCase ):
if len(UpperCAmelCase ) <= 1:
return arr, 0
lowercase__ : List[str] = len(UpperCAmelCase ) // 2
lowercase__ : Optional[Any] = arr[0:mid]
lowercase__ : Any = arr[mid:]
lowercase__ , lowercase__ : Any = count_inversions_recursive(UpperCAmelCase )
lowercase__ , lowercase__ : List[str] = count_inversions_recursive(UpperCAmelCase )
lowercase__ , lowercase__ : Optional[Any] = _count_cross_inversions(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = 0
while i < len(UpperCAmelCase ) and j < len(UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __UpperCamelCase ( ):
lowercase__ : str = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase__ : Dict = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : Union[str, Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase__ : Optional[int] = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : int = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase )
# an empty list should also have zero inversions
lowercase__ : Optional[Any] = []
lowercase__ : Any = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : List[Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase )
if __name__ == "__main__":
main()
| 152
| 1
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ : Optional[Any] = NewType('''DataClass''', Any)
lowerCAmelCase__ : Dict = NewType('''DataClassType''', Any)
def UpperCamelCase__ ( A__ ) -> Tuple:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def UpperCamelCase__ ( A__ ) -> Any:
snake_case__ : str = {str(__lowerCAmelCase ): choice for choice in choices}
return lambda A__ : str_to_choice.get(__lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase__ ( *,
A__ = None , A__ = None , A__ = dataclasses.MISSING , A__ = dataclasses.MISSING , A__ = None , **A__ , ) -> Optional[Any]:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case__ : Any = {}
if aliases is not None:
snake_case__ : Any = aliases
if help is not None:
snake_case__ : List[str] = help
return dataclasses.field(metadata=__lowerCAmelCase , default=__lowerCAmelCase , default_factory=__lowerCAmelCase , **__lowerCAmelCase )
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = 42
def __init__( self , __UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if "formatter_class" not in kwargs:
snake_case__ : str = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
snake_case__ : List[Any] = [dataclass_types]
snake_case__ : Dict = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def __a ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = F"""--{field.name}"""
snake_case__ : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCAmelCase ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
snake_case__ : Dict = kwargs.pop('aliases' , [] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Dict = [aliases]
snake_case__ : Optional[int] = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase , 'UnionType' ) and isinstance(_lowerCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F""" Problem encountered in field \'{field.name}\'.""" )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
snake_case__ : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case__ : str = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case__ : Any = (
field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case__ : Tuple = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case__ : int = {}
if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )):
if origin_type is Literal:
snake_case__ : str = field.type.__args__
else:
snake_case__ : Tuple = [x.value for x in field.type]
snake_case__ : List[str] = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
snake_case__ : Union[str, Any] = field.default
else:
snake_case__ : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case__ : Union[str, Any] = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
snake_case__ : Union[str, Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case__ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case__ : int = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case__ : int = '?'
# This is the value that will get picked if we do --field_name (without value)
snake_case__ : int = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : List[str] = field.type.__args__[0]
snake_case__ : Union[str, Any] = '+'
if field.default_factory is not dataclasses.MISSING:
snake_case__ : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case__ : Union[str, Any] = True
else:
snake_case__ : Optional[int] = field.type
if field.default is not dataclasses.MISSING:
snake_case__ : int = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case__ : Dict = field.default_factory()
else:
snake_case__ : int = True
parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case__ : Optional[int] = False
parser.add_argument(F"""--no_{field.name}""" , action='store_false' , dest=field.name , **_lowerCAmelCase )
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if hasattr(_lowerCAmelCase , '_argument_group_name' ):
snake_case__ : List[Any] = self.add_argument_group(dtype._argument_group_name )
else:
snake_case__ : List[str] = self
try:
snake_case__ : Dict = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
snake_case__ : Optional[Any] = '.'.join(map(_lowerCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
snake_case__ : int = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=None , ) -> List[str]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case__ : Tuple = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case__ : Union[str, Any] = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case__ , snake_case__ : List[str] = args_file_parser.parse_known_args(args=_lowerCAmelCase )
snake_case__ : str = vars(_lowerCAmelCase ).get(args_file_flag.lstrip('-' ) , _lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
snake_case__ : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case__ : int = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case__ , snake_case__ : str = self.parse_known_args(args=_lowerCAmelCase )
snake_case__ : List[Any] = []
for dtype in self.dataclass_types:
snake_case__ : str = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
snake_case__ : str = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : List[Any] = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __a ( self , __UpperCamelCase , __UpperCamelCase = False ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = set(args.keys() )
snake_case__ : Optional[int] = []
for dtype in self.dataclass_types:
snake_case__ : Any = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
snake_case__ : List[Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case__ : List[Any] = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}""" )
return tuple(_lowerCAmelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase = False ) -> Any:
'''simple docstring'''
with open(Path(_lowerCAmelCase ) , encoding='utf-8' ) as open_json_file:
snake_case__ : Optional[int] = json.loads(open_json_file.read() )
snake_case__ : Dict = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase = False ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 709
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
lowerCAmelCase__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
lowerCAmelCase__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRContextEncoderTokenizer
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = DPRQuestionEncoderTokenizer
lowerCAmelCase__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase__ : int = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowerCamelCase )
class __snake_case :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
snake_case__ : Optional[Any] = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
snake_case__ : Optional[int] = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
snake_case__ : List[Any] = len(__UpperCamelCase )
snake_case__ : str = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
snake_case__ : Optional[int] = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['input_ids']
snake_case__ : Union[str, Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = 64 , __UpperCamelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Optional[Any] = reader_input['input_ids']
snake_case__ , snake_case__ , snake_case__ : Any = reader_output[:3]
snake_case__ : List[str] = len(__UpperCamelCase )
snake_case__ : Tuple = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : str = len(__UpperCamelCase )
snake_case__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
snake_case__ : Any = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : str = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
snake_case__ : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case__ : str = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCamelCase )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = DPRReaderTokenizer
| 699
| 0
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int = 1.0 , __lowerCamelCase : Any = None , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCAmelCase = initial_learning_rate
UpperCAmelCase = warmup_steps
UpperCAmelCase = power
UpperCAmelCase = decay_schedule_fn
UpperCAmelCase = name
def __call__( self : str , __lowerCamelCase : Any ) -> str:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase = tf.cast(_snake_case , tf.floataa )
UpperCAmelCase = tf.cast(self.warmup_steps , tf.floataa )
UpperCAmelCase = global_step_float / warmup_steps_float
UpperCAmelCase = self.initial_learning_rate * tf.math.pow(_snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_snake_case , )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0.9 , lowerCAmelCase_ = 0.999 , lowerCAmelCase_ = 1e-8 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ) ->Optional[int]:
UpperCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
UpperCAmelCase = WarmUp(
initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_schedule_fn=_SCREAMING_SNAKE_CASE , warmup_steps=_SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
UpperCAmelCase = AdamWeightDecay(
learning_rate=_SCREAMING_SNAKE_CASE , weight_decay_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_SCREAMING_SNAKE_CASE , )
else:
UpperCAmelCase = tf.keras.optimizers.Adam(
learning_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , __lowerCamelCase : Optional[int] = 0.001 , __lowerCamelCase : Any = 0.9 , __lowerCamelCase : int = 0.999 , __lowerCamelCase : List[str] = 1e-7 , __lowerCamelCase : Union[str, Any] = False , __lowerCamelCase : Union[str, Any] = 0.0 , __lowerCamelCase : str = None , __lowerCamelCase : Any = None , __lowerCamelCase : str = "AdamWeightDecay" , **__lowerCamelCase : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case )
UpperCAmelCase = weight_decay_rate
UpperCAmelCase = include_in_weight_decay
UpperCAmelCase = exclude_from_weight_decay
@classmethod
def _lowercase ( cls : List[str] , __lowerCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase = {"WarmUp": WarmUp}
return super(_snake_case , cls ).from_config(_snake_case , custom_objects=_snake_case )
def _lowercase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super(_snake_case , self )._prepare_local(_snake_case , _snake_case , _snake_case )
UpperCAmelCase = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _lowercase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _lowercase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=None , **__lowerCamelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = list(zip(*_snake_case ) )
return super(_snake_case , self ).apply_gradients(zip(_snake_case , _snake_case ) , name=_snake_case , **_snake_case )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase = apply_state or {}
UpperCAmelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase = self._fallback_apply_state(_snake_case , _snake_case )
UpperCAmelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowercase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , _snake_case )
UpperCAmelCase = self._decay_weights_op(_snake_case , _snake_case , _snake_case )
with tf.control_dependencies([decay] ):
return super(_snake_case , self )._resource_apply_dense(_snake_case , _snake_case , **_snake_case )
def _lowercase ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , _snake_case )
UpperCAmelCase = self._decay_weights_op(_snake_case , _snake_case , _snake_case )
with tf.control_dependencies([decay] ):
return super(_snake_case , self )._resource_apply_sparse(_snake_case , _snake_case , _snake_case , **_snake_case )
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowercase ( self : str , __lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_snake_case , _snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_snake_case , _snake_case ) is not None:
return False
return True
class __lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = None
@property
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if self._accum_steps is None:
UpperCAmelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , __lowerCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
if not self._gradients:
UpperCAmelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_snake_case ) , trainable=_snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_snake_case ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_snake_case )}""" )
for accum_gradient, gradient in zip(self._gradients , _snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_snake_case )
self._accum_steps.assign_add(1 )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_snake_case ) )
| 377
|
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71
| 0
|
"""simple docstring"""
def lowerCAmelCase_ ( lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = [0] * len(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
# use last results for better performance - dynamic programming
__SCREAMING_SNAKE_CASE : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__SCREAMING_SNAKE_CASE : int = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__SCREAMING_SNAKE_CASE : int = j
return prefix_result
def lowerCAmelCase_ ( lowercase_ : str ):
'''simple docstring'''
return max(prefix_function(lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
from math import isqrt
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase_ ) + 1 ) )
def lowerCAmelCase_ ( lowercase_ : int = 10**6 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : int = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 401
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {"""vocab_file""": """spiece.model"""}
A = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
A = {"""bert_for_seq_generation""": 512}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = []
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Dict = vocab_file
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase_)
@property
def a_ ( self : List[str]):
"""simple docstring"""
return self.sp_model.get_piece_size()
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.__dict__.copy()
__UpperCAmelCase : List[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self : Any , UpperCamelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_)
return token
def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_) + token
__UpperCAmelCase : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase_)
out_string += self.sp_model.decode(UpperCamelCase_)
return out_string.strip()
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase_ , "wb") as fi:
__UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_)
return (out_vocab_file,)
| 77
|
"""simple docstring"""
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> int:
assert column_title.isupper()
a_ : int = 0
a_ : Tuple = len(_SCREAMING_SNAKE_CASE ) - 1
a_ : Union[str, Any] = 0
while index >= 0:
a_ : List[Any] = (ord(column_title[index] ) - 64) * pow(26 , _SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 473
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase : Union[str, Any] = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __snake_case( unittest.TestCase , __A ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_tool('''text-question-answering''' )
self.tool.setup()
_SCREAMING_SNAKE_CASE = load_tool('''text-question-answering''' , remote=A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tool(A_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A_ , '''launched the BigScience Research Workshop''' )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.remote_tool(A_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A_ , '''launched the BigScience Research Workshop''' )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tool(text=A_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A_ , '''launched the BigScience Research Workshop''' )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.remote_tool(text=A_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A_ , '''launched the BigScience Research Workshop''' )
| 168
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Tuple = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["""DeiTFeatureExtractor"""]
lowerCamelCase : int = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 168
| 1
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=False ) -> Any:
lowercase__ , lowercase__ : Any = create_model(
'''HTSAT-tiny''' , '''roberta''' , __lowerCamelCase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=__lowerCamelCase , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = {}
lowercase__ : str = r'''.*sequential.(\d+).*'''
lowercase__ : Union[str, Any] = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase__ : Optional[Any] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
lowercase__ : List[str] = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
lowercase__ : str = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(__lowerCamelCase )//3}.linear.""" )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : int = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowercase__ : List[Any] = 1 if projecton_layer == 0 else 2
lowercase__ : Optional[int] = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowercase__ : Any = value
lowercase__ : List[str] = mixed_qkv.size(0 ) // 3
lowercase__ : Tuple = mixed_qkv[:qkv_dim]
lowercase__ : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
lowercase__ : Any = mixed_qkv[qkv_dim * 2 :]
lowercase__ : List[str] = query_layer
lowercase__ : Dict = key_layer
lowercase__ : Tuple = value_layer
else:
lowercase__ : Optional[int] = value
return model_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Optional[int]:
lowercase__ , lowercase__ : Tuple = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
lowercase__ : Any = clap_model.state_dict()
lowercase__ : Any = rename_state_dict(__lowerCamelCase )
lowercase__ : List[str] = ClapConfig()
lowercase__ : Optional[int] = enable_fusion
lowercase__ : str = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 560
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase_ = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 560
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 29
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29
| 1
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCAmelCase ( lowerCamelCase_ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase_ , lowerCamelCase_ )
# Predict target for test data
SCREAMING_SNAKE_CASE_ : Dict = xgb.predict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = predictions.reshape(len(lowerCamelCase_ ) , 1 )
return predictions
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = fetch_california_housing()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = data_handling(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = train_test_split(
lowerCamelCase_ , lowerCamelCase_ , test_size=0.2_5 , random_state=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = xgboost(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(lowerCamelCase_ , lowerCamelCase_ )}' )
print(F'Mean Square Error : {mean_squared_error(lowerCamelCase_ , lowerCamelCase_ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 105
|
from math import pow, sqrt
def _A( *UpperCamelCase__ : float ) -> bool:
'''simple docstring'''
__lowercase = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def _A( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def _A( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _A( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _A( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _A( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 332
| 0
|
'''simple docstring'''
def A__ ( A_ ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def A__ ( A_ ) -> list[tuple[int, int]]:
_lowercase = 0
_lowercase = len(A_ ) # No of vertices in graph
_lowercase = [0] * n
_lowercase = [False] * n
def dfs(A_ , A_ , A_ , A_ ):
_lowercase = True
_lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(A_ , A_ , A_ , id_ )
_lowercase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_lowercase = min(low[at] , low[to] )
_lowercase = []
for i in range(A_ ):
if not visited[i]:
dfs(A_ , -1 , A_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __A : Union[str, Any] ):
"""simple docstring"""
_lowercase = parent
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return {}
def A__ ( ) -> str:
_lowercase = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_lowercase = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = MarkupLMFeatureExtractionTester(self )
@property
def snake_case ( self : List[str] ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case ( self : Dict ):
"""simple docstring"""
# Initialize feature_extractor
_lowercase = self.feature_extraction_class()
# Test not batched input
_lowercase = get_html_strings()[0]
_lowercase = feature_extractor(__A )
# fmt: off
_lowercase = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_lowercase = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , __A )
self.assertEqual(encoding.xpaths , __A )
# Test batched
_lowercase = get_html_strings()
_lowercase = feature_extractor(__A )
# fmt: off
_lowercase = expected_nodes + [["My First Heading", "My first paragraph."]]
_lowercase = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __A )
self.assertEqual(encoding.xpaths , __A )
| 602
| 0
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCAmelCase : Optional[int] = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_lowerCAmelCase : Dict = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_lowerCAmelCase : List[Any] = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCAmelCase : Dict = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCAmelCase : List[str] = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for tf_name, hf_name in patterns:
_lowerCamelCase : Tuple = k.replace(_lowerCAmelCase , _lowerCAmelCase )
return k
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> BigBirdPegasusForConditionalGeneration:
'''simple docstring'''
_lowerCamelCase : List[str] = BigBirdPegasusConfig(**_lowerCAmelCase )
_lowerCamelCase : Optional[int] = BigBirdPegasusForConditionalGeneration(_lowerCAmelCase )
_lowerCamelCase : int = torch_model.state_dict()
_lowerCamelCase : Any = {}
# separating decoder weights
_lowerCamelCase : Any = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_lowerCamelCase : Any = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_lowerCamelCase : Any = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
_lowerCamelCase : Tuple = DECODER_PATTERNS
_lowerCamelCase : List[str] = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_lowerCamelCase : int = v.T
_lowerCamelCase : int = torch.from_numpy(_lowerCAmelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_lowerCamelCase : List[Any] = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
_lowerCamelCase : Optional[Any] = REMAINING_PATTERNS
_lowerCamelCase : int = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_lowerCamelCase : str = v.T
_lowerCamelCase : List[str] = torch.from_numpy(_lowerCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_lowerCamelCase : List[Any] = mapping["model.embed_positions.weight"]
_lowerCamelCase : Union[str, Any] = mapping.pop("model.embed_positions.weight" )
_lowerCamelCase : Optional[Any] = torch_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
_lowerCamelCase : Tuple = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : str = tf.train.list_variables(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
_lowerCamelCase : Union[str, Any] = ["global_step"]
for name, shape in tqdm(_lowerCAmelCase , desc="converting tf checkpoint to dict" ):
_lowerCamelCase : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCamelCase : Tuple = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = array
return tf_weights
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Dict = get_tf_weights_as_numpy(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = convert_bigbird_pegasus(_lowerCAmelCase , _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowerCAmelCase : List[Any] = parser.parse_args()
_lowerCAmelCase : List[str] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 46
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class A__ ( __snake_case ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 629
| 0
|
from timeit import timeit
def UpperCamelCase ( snake_case__ : int ):
'''simple docstring'''
if number < 0:
raise ValueError("""the value of input must not be negative""" )
__snake_case :List[str] = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase ( snake_case__ : int ):
'''simple docstring'''
if number < 0:
raise ValueError("""the value of input must not be negative""" )
__snake_case :Any = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase ( ):
'''simple docstring'''
def do_benchmark(snake_case__ : int ) -> None:
__snake_case :Union[str, Any] = """import __main__ as z"""
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(snake_case__ ) = }''' )
__snake_case :str = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" ,setup=snake_case__ )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case__ ) = }''' )
__snake_case :Tuple = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" ,setup=snake_case__ ,)
print(f'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 291
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=4_00 , a__=True , a__=32 , a__=True , ) -> List[Any]:
'''simple docstring'''
__snake_case :List[Any] = parent
__snake_case :Dict = batch_size
__snake_case :Optional[Any] = num_channels
__snake_case :Dict = image_size
__snake_case :Dict = min_resolution
__snake_case :Dict = max_resolution
__snake_case :List[Any] = do_resize
__snake_case :Dict = size_divisor
__snake_case :Union[str, Any] = do_rescale
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Tuple = GLPNImageProcessor if is_vision_available() else None
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Dict = GLPNImageProcessingTester(self )
@property
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size_divisor""" ) )
self.assertTrue(hasattr(a__ , """resample""" ) )
self.assertTrue(hasattr(a__ , """do_rescale""" ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 291
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowercase : Union[str, Any] = {'''UserAgent''': UserAgent().random}
def lowercase ( __A : Optional[Any] ) -> dict:
'''simple docstring'''
snake_case : str = script.contents[0]
snake_case : List[str] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Tuple = F"""https://www.instagram.com/{username}/"""
snake_case : List[Any] = self.get_json()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = requests.get(self.url ,headers=SCREAMING_SNAKE_CASE_ ).text
snake_case : int = BeautifulSoup(SCREAMING_SNAKE_CASE_ ,"""html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowercase ( __A : str = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
snake_case : List[str] = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : int = InstagramUser('''github''')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 36
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''swinv2'''
snake_case__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , __UpperCamelCase : List[str]=224 , __UpperCamelCase : Any=4 , __UpperCamelCase : int=3 , __UpperCamelCase : Tuple=96 , __UpperCamelCase : Union[str, Any]=[2, 2, 6, 2] , __UpperCamelCase : List[Any]=[3, 6, 12, 24] , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : List[str]=4.0 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : int=False , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : Any=1E-5 , __UpperCamelCase : Optional[Any]=32 , **__UpperCamelCase : Any , ) -> List[Any]:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
_UpperCamelCase = (0, 0, 0, 0)
| 420
| 0
|
def _lowerCAmelCase ( UpperCamelCase__: List[str] , UpperCamelCase__: Any ) -> str:
"""simple docstring"""
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 713
|
from sklearn.metrics import recall_score
import datasets
_lowercase : Any = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
_lowercase : int = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
_lowercase : Dict = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _UpperCAmelCase ( self , a__ , a__ , a__=None , a__=1 , a__="binary" , a__=None , a__="warn" , ) -> Any:
A = recall_score(
a__ , a__ , labels=a__ , pos_label=a__ , average=a__ , sample_weight=a__ , zero_division=a__ , )
return {"recall": float(a__ ) if score.size == 1 else score}
| 546
| 0
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class __magic_name__ :
def __init__( self : str , snake_case__ : Optional[Any] = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('''Unsupported Group''' )
lowercase :Dict = primes[group]['''prime''']
lowercase :Optional[int] = primes[group]['''generator''']
lowercase :str = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = pow(self.generator , self.__private_key , self.prime )
return hex(_lowerCamelCase )[2:]
def __snake_case ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(_lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def __snake_case ( self : List[str] , snake_case__ : int ):
'''simple docstring'''
lowercase :Optional[Any] = int(_lowerCamelCase , base=1_6 )
if not self.is_valid_public_key(_lowerCamelCase ):
raise ValueError('''Invalid public key''' )
lowercase :int = pow(_lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(_lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def __snake_case ( snake_case__ : List[str] , snake_case__ : Optional[Any] ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowerCamelCase , (prime - 1) // 2 , _lowerCamelCase ) == 1
)
@staticmethod
def __snake_case ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int = 1_4 ):
'''simple docstring'''
lowercase :Dict = int(_lowerCamelCase , base=1_6 )
lowercase :Union[str, Any] = int(_lowerCamelCase , base=1_6 )
lowercase :List[Any] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('''Invalid public key''' )
lowercase :List[str] = pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return shaaaa(str(_lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_lowerCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _lowerCAmelCase ( lowercase : Optional[int] , lowercase : tuple , lowercase : Path , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : int=False , ) ->Tuple:
"""simple docstring"""
output_path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , use_external_data_format=lowercase , enable_onnx_checker=lowercase , opset_version=lowercase , )
else:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , opset_version=lowercase , )
@torch.no_grad()
def _lowerCAmelCase ( lowercase : str , lowercase : str , lowercase : int , lowercase : bool = False ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase__ = '''cpu'''
lowercase__ = Path(lowercase )
# VAE DECODER
lowercase__ = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
lowercase__ = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ = vae_decoder.decode
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , 2_5 , 2_5 ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowercase , )
del vae_decoder
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_lowerCAmelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 161
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=[1, 1, 2] ,__UpperCamelCase=1 ,__UpperCamelCase=32 ,__UpperCamelCase=4 ,__UpperCamelCase=8 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu_new" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=512 ,__UpperCamelCase=3 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,__UpperCamelCase=False ,) -> List[Any]:
'''simple docstring'''
lowercase_ : int = parent
lowercase_ : Dict = batch_size
lowercase_ : Dict = seq_length
lowercase_ : int = is_training
lowercase_ : Dict = use_input_mask
lowercase_ : List[str] = use_token_type_ids
lowercase_ : List[Any] = use_labels
lowercase_ : List[str] = vocab_size
lowercase_ : Optional[int] = block_sizes
lowercase_ : List[Any] = num_decoder_layers
lowercase_ : int = d_model
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = d_head
lowercase_ : List[Any] = d_inner
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Any = hidden_dropout
lowercase_ : Any = attention_dropout
lowercase_ : int = activation_dropout
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : List[Any] = 2
lowercase_ : Dict = num_labels
lowercase_ : Dict = num_choices
lowercase_ : int = scope
lowercase_ : Optional[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase_ : Optional[int] = n_head
# Used in the tests to check the size of the first hidden state
lowercase_ : str = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase_ : str = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase_ : Any = self.num_hidden_layers + 2
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = None
if self.use_input_mask:
lowercase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[int] = None
if self.use_token_type_ids:
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase_ : List[str] = None
lowercase_ : List[Any] = None
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : Dict = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : Union[str, Any] = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = TFFunnelModel(config=__UpperCamelCase )
lowercase_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : Optional[Any] = model(__UpperCamelCase )
lowercase_ : int = [input_ids, input_mask]
lowercase_ : Any = model(__UpperCamelCase )
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
lowercase_ : Dict = False
lowercase_ : int = TFFunnelModel(config=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
lowercase_ : int = False
lowercase_ : List[str] = TFFunnelModel(config=__UpperCamelCase )
lowercase_ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = TFFunnelBaseModel(config=__UpperCamelCase )
lowercase_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : Dict = model(__UpperCamelCase )
lowercase_ : Optional[Any] = [input_ids, input_mask]
lowercase_ : Optional[Any] = model(__UpperCamelCase )
lowercase_ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
lowercase_ : Optional[int] = False
lowercase_ : Optional[int] = TFFunnelBaseModel(config=__UpperCamelCase )
lowercase_ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
lowercase_ : int = False
lowercase_ : Optional[int] = TFFunnelBaseModel(config=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Optional[int]:
'''simple docstring'''
lowercase_ : Any = TFFunnelForPreTraining(config=__UpperCamelCase )
lowercase_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = TFFunnelForMaskedLM(config=__UpperCamelCase )
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = self.num_labels
lowercase_ : List[str] = TFFunnelForSequenceClassification(config=__UpperCamelCase )
lowercase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
lowercase_ : int = self.num_choices
lowercase_ : List[str] = TFFunnelForMultipleChoice(config=__UpperCamelCase )
lowercase_ : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : str = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : Optional[Any] = tf.tile(tf.expand_dims(__UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowercase_ : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.num_labels
lowercase_ : int = TFFunnelForTokenClassification(config=__UpperCamelCase )
lowercase_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = TFFunnelForQuestionAnswering(config=__UpperCamelCase )
lowercase_ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : str = TFFunnelModelTester(self )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
@require_tf
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = TFFunnelModelTester(self ,base=__UpperCamelCase )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
| 477
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowercase__( ):
lowercase_ : List[Any] = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase_ : int = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Let's go
lowercase_ : str = parser.parse_args()
if not hasattr(__SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase_ : Optional[Any] = args.func(__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 477
| 1
|
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : Optional[int] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCamelCase_ : List[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can\'t be 0 or negative""" )
if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Any = position % (lowest * 2) # puts it in bounds
UpperCamelCase_ : int = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = ["""""".join(_SCREAMING_SNAKE_CASE ) for row in temp_grid]
UpperCamelCase_ : Dict = """""".join(_SCREAMING_SNAKE_CASE )
return output_string
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : str = []
UpperCamelCase_ : List[str] = key - 1
if key <= 0:
raise ValueError("""Height of grid can\'t be 0 or negative""" )
if key == 1:
return input_string
UpperCamelCase_ : Optional[int] = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ : Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCamelCase_ : Union[str, Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
UpperCamelCase_ : int = 0
for row in temp_grid: # fills in the characters
UpperCamelCase_ : int = input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )]
grid.append(list(_SCREAMING_SNAKE_CASE ) )
counter += len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = """""" # reads as zigzag
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCamelCase_ : str = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
UpperCamelCase_ : Optional[Any] = {}
for key_guess in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # tries every key
UpperCamelCase_ : Optional[Any] = decrypt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : List[str] = 16
a : str = 32
def __magic_name__ ( UpperCamelCase : Accelerator , UpperCamelCase : int = 16 ) -> Dict:
a__ = AutoTokenizer.from_pretrained('bert-base-cased' )
a__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ = 16
elif accelerator.mixed_precision != "no":
a__ = 8
else:
a__ = None
return tokenizer.pad(
UpperCamelCase , padding='longest' , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
a__ = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Union[str, Any] = mocked_dataloaders # noqa: F811
def __magic_name__ ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase ) == "1":
a__ = 2
# New Code #
a__ = int(args.gradient_accumulation_steps )
a__ = int(args.local_sgd_steps )
# Initialize accelerator
a__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config['lr']
a__ = int(config['num_epochs'] )
a__ = int(config['seed'] )
a__ = int(config['batch_size'] )
a__ = evaluate.load('glue' , 'mrpc' )
set_seed(UpperCamelCase )
a__ , a__ = get_dataloaders(UpperCamelCase , UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ = model.to(accelerator.device )
# Instantiate optimizer
a__ = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
a__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=UpperCamelCase , model=UpperCamelCase , local_sgd_steps=UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase ):
a__ = model(**UpperCamelCase )
a__ = output.loss
accelerator.backward(UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ = model(**UpperCamelCase )
a__ = outputs.logits.argmax(dim=-1 )
a__ , a__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
a__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase )
def __magic_name__ ( ) -> Any:
a__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase , default=UpperCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=UpperCamelCase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
a__ = parser.parse_args()
a__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 273
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> str:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> int:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> int:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> int:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> str:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
class _a ( metaclass=lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_) -> str:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def __lowercase ( cls , *UpperCAmelCase_ , **UpperCAmelCase_) -> str:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"])
| 708
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = ConsistencyModelPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Any = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def __lowercase ( self , UpperCAmelCase_=False) -> str:
'''simple docstring'''
if class_cond:
lowercase__: List[str] = self.dummy_cond_unet
else:
lowercase__: List[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
lowercase__: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: Any = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_=0) -> Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowercase__: List[str] = torch.manual_seed(UpperCAmelCase_)
else:
lowercase__: Optional[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowercase__: Optional[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: List[str] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: List[str] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: List[str] = image[0, -3:, -3:, -1]
lowercase__: Union[str, Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: Optional[Any] = self.get_dummy_components(class_cond=UpperCAmelCase_)
lowercase__: List[str] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: Union[str, Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = 0
lowercase__: Union[str, Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: Optional[int] = image[0, -3:, -3:, -1]
lowercase__: Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: str = self.get_dummy_components()
lowercase__: Optional[int] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: List[str] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: str = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Any = 1
lowercase__: str = None
lowercase__: Dict = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: List[Any] = image[0, -3:, -3:, -1]
lowercase__: int = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components(class_cond=UpperCAmelCase_)
lowercase__: Any = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: int = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = 1
lowercase__: Tuple = None
lowercase__: int = 0
lowercase__: str = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: Tuple = image[0, -3:, -3:, -1]
lowercase__: Union[str, Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , UpperCAmelCase_=0 , UpperCAmelCase_=False , UpperCAmelCase_="cpu" , UpperCAmelCase_=torch.floataa , UpperCAmelCase_=(1, 3, 64, 64)) -> List[Any]:
'''simple docstring'''
lowercase__: Any = torch.manual_seed(UpperCAmelCase_)
lowercase__: Union[str, Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
lowercase__: Dict = self.get_fixed_latents(seed=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ , shape=UpperCAmelCase_)
lowercase__: Tuple = latents
return inputs
def __lowercase ( self , UpperCAmelCase_=0 , UpperCAmelCase_="cpu" , UpperCAmelCase_=torch.floataa , UpperCAmelCase_=(1, 3, 64, 64)) -> Tuple:
'''simple docstring'''
if type(UpperCAmelCase_) == str:
lowercase__: Optional[int] = torch.device(UpperCAmelCase_)
lowercase__: Any = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowercase__: Optional[int] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_)
return latents
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: List[Any] = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Any = self.get_inputs()
lowercase__: Any = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = image[0, -3:, -3:, -1]
lowercase__: Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: str = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[str] = self.get_inputs()
lowercase__: Optional[int] = 1
lowercase__: int = None
lowercase__: List[Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: int = image[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: Dict = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: Dict = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[str] = self.get_inputs(get_fixed_latents=UpperCAmelCase_ , device=UpperCAmelCase_)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase_ , enable_math=UpperCAmelCase_ , enable_mem_efficient=UpperCAmelCase_):
lowercase__: Union[str, Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__: Any = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: List[Any] = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[Any] = self.get_inputs(get_fixed_latents=UpperCAmelCase_ , device=UpperCAmelCase_)
lowercase__: Optional[Any] = 1
lowercase__: Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase_ , enable_math=UpperCAmelCase_ , enable_mem_efficient=UpperCAmelCase_):
lowercase__: str = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: Any = image[0, -3:, -3:, -1]
lowercase__: Any = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 120
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__snake_case :Tuple =HfArgumentParser(InitializationArguments)
__snake_case :Optional[int] =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__snake_case :int =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__snake_case :Optional[int] ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__snake_case :Optional[Any] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__snake_case :List[Any] =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 106
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Union[str, Any] )-> Optional[Any]:
snake_case = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case = shift_tokens_right(__snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case = model(__snake_case , decoder_input_ids=__snake_case ).logits
snake_case = optax.softmax_cross_entropy(__snake_case , onehot(__snake_case , logits.shape[-1] ) ).mean()
snake_case = -(labels.shape[-1] * loss.item())
snake_case = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 369
| 0
|
from math import sqrt
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_1 ):
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 707
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase_ ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 275
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : List[Any] ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase__ ( _lowercase : dict[int, list[int]] ) -> list[tuple[int, int]]:
__UpperCAmelCase: Any = 0
__UpperCAmelCase: List[Any] = len(_lowercase ) # No of vertices in graph
__UpperCAmelCase: Optional[Any] = [0] * n
__UpperCAmelCase: Dict = [False] * n
def dfs(_lowercase : Any , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[int] ):
__UpperCAmelCase: List[str] = True
__UpperCAmelCase: int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowercase , _lowercase , _lowercase , id_ )
__UpperCAmelCase: Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__UpperCAmelCase: List[str] = min(low[at] , low[to] )
__UpperCAmelCase: list[tuple[int, int]] = []
for i in range(_lowercase ):
if not visited[i]:
dfs(_lowercase , -1 , _lowercase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 523
| 0
|
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 0 ):
A : List[str] = length or len(_lowerCamelCase )
A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A : Optional[Any] = list_data[i + 1], list_data[i]
A : List[Any] = True
return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 17
| 1
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def _UpperCamelCase ( self ) -> int:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ = 'A red cat sitting on a park bench'
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 198
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( lowerCamelCase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """ViltImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 301
| 0
|
UpperCAmelCase__ = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 718
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
UpperCAmelCase__ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[int] = ['input_ids', 'attention_mask']
UpperCamelCase_ : Optional[int] = MBartTokenizer
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : Tuple , lowerCamelCase__ : str=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Union[str, Any]="<pad>" , lowerCamelCase__ : List[Any]="<mask>" , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
__lowercase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__lowercase = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowercase = src_lang if src_lang is not None else '''en_XX'''
__lowercase = self.convert_tokens_to_ids(self._src_lang )
__lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : str ) -> None:
"""simple docstring"""
__lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] , lowerCamelCase__ : Optional[str] , **lowerCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__lowercase = src_lang
__lowercase = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = self.convert_tokens_to_ids(lowerCamelCase__ )
__lowercase = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str = "en_XX" , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : str = "ro_RO" , **lowerCamelCase__ : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
__lowercase = src_lang
__lowercase = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Optional[Any] ) -> None:
"""simple docstring"""
__lowercase = self.convert_tokens_to_ids(lowerCamelCase__ )
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
__lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : str ) -> None:
"""simple docstring"""
__lowercase = self.convert_tokens_to_ids(lowerCamelCase__ )
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
__lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
__lowercase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 362
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase_ = (3, 9, -11, 0, 7, 5, 1, -1)
lowercase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : int
__lowerCamelCase : Node | None
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = None
for i in sorted(A , reverse=A ):
_a = Node(A , self.head )
def __iter__(self ) -> Iterator[int]:
"""simple docstring"""
_a = self.head
while node:
yield node.data
_a = node.next_node
def __len__(self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self ) -> str:
"""simple docstring"""
return " -> ".join([str(A ) for node in self] )
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return SortedLinkedList(list(__A) + list(__A))
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 11
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
| 6
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDeiTModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase (self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFDeiTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def _lowercase (self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 713
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 628
| 0
|
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Tuple , _snake_case :List[str]=None , **_snake_case :Tuple ) -> List[str]:
_A = [x.strip() for x in open(_snake_case ).readlines()]
_A = [x.strip() for x in open(_snake_case ).readlines()][: len(_snake_case )]
_A = calculate_rouge(_snake_case , _snake_case , **_snake_case )
if save_path is not None:
save_json(_snake_case , _snake_case , indent=_snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 2
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def _A ( SCREAMING_SNAKE_CASE ):
return input_array.reshape((input_array.size, 1) )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = np.nan
for i in range(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = features[:, labels == i]
UpperCAmelCase__: int = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase__: Optional[Any] = data - column_reshape(SCREAMING_SNAKE_CASE )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(SCREAMING_SNAKE_CASE ,centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__: Dict = np.dot(SCREAMING_SNAKE_CASE ,centered_data.T )
return covariance_sum / features.shape[1]
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = features.mean(1 )
UpperCAmelCase__: Optional[Any] = np.nan
for i in range(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[Any] = features[:, labels == i]
UpperCAmelCase__: Optional[Any] = data.shape[1]
UpperCAmelCase__: List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE ) - column_reshape(SCREAMING_SNAKE_CASE ) ,(column_reshape(SCREAMING_SNAKE_CASE ) - column_reshape(SCREAMING_SNAKE_CASE )).T ,)
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__: Tuple = device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE ) - column_reshape(SCREAMING_SNAKE_CASE ) ,(column_reshape(SCREAMING_SNAKE_CASE ) - column_reshape(SCREAMING_SNAKE_CASE )).T ,)
return covariance_sum / features.shape[1]
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
# Check if the features have been loaded
if features.any():
UpperCAmelCase__: Tuple = features.mean(1 )
# Center the dataset
UpperCAmelCase__: List[Any] = features - np.reshape(SCREAMING_SNAKE_CASE ,(data_mean.size, 1) )
UpperCAmelCase__: Optional[int] = np.dot(SCREAMING_SNAKE_CASE ,centered_data.T ) / features.shape[1]
UpperCAmelCase__ , UpperCAmelCase__: Dict = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase__: Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase__: Tuple = np.dot(filtered_eigenvectors.T ,SCREAMING_SNAKE_CASE )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format="%(message)s" ,force=SCREAMING_SNAKE_CASE )
logging.error("Dataset empty" )
raise AssertionError
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase__ , UpperCAmelCase__: List[str] = eigh(
covariance_between_classes(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) ,covariance_within_classes(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) ,)
UpperCAmelCase__: int = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Optional[Any] = np.linalg.svd(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Optional[Any] = svd_matrix[:, 0:dimensions]
UpperCAmelCase__: Optional[int] = np.dot(filtered_svd_matrix.T ,SCREAMING_SNAKE_CASE )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR ,format="%(message)s" ,force=SCREAMING_SNAKE_CASE )
logging.error("Dataset empty" )
raise AssertionError
def _A ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase__: List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase__: Optional[int] = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase__: List[Any] = 2
UpperCAmelCase__: Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(SCREAMING_SNAKE_CASE ) as error_info:
UpperCAmelCase__: List[Any] = linear_discriminant_analysis(
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE ,np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def _A ( ):
UpperCAmelCase__: Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase__: Optional[int] = 2
UpperCAmelCase__: Union[str, Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(SCREAMING_SNAKE_CASE ) as error_info:
UpperCAmelCase__: Any = principal_component_analysis(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
if not np.allclose(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
__magic_name__ = r'''\w+[.]\d+'''
__magic_name__ = re.findall(snake_case_ , snake_case_ )
for pat in pats:
__magic_name__ = key.replace(snake_case_ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : int ):
__magic_name__ = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__magic_name__ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__magic_name__ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__magic_name__ = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__magic_name__ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__magic_name__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__magic_name__ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__magic_name__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__magic_name__ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__magic_name__ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
__magic_name__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__magic_name__ = flax_model.init_weights(PRNGKey(snake_case_ ) )
__magic_name__ = flatten_dict(snake_case_ )
__magic_name__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ = rename_key(snake_case_ )
__magic_name__ = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__magic_name__ , __magic_name__ = rename_key_and_reshape_tensor(snake_case_ , snake_case_ , snake_case_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
__magic_name__ = jnp.asarray(snake_case_ )
return unflatten_dict(snake_case_ )
| 709
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 678
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __snake_case ,__snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowercase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase = CLIPTextModel(lowerCAmelCase_ )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowercase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
__lowercase = image / 2 + 0.5
if str(lowerCAmelCase_ ).startswith("mps" ):
__lowercase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowercase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowercase = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self ):
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = CycleDiffusionPipeline(**lowerCAmelCase_ )
__lowercase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowercase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowercase = pipe(**lowerCAmelCase_ )
__lowercase = output.images
__lowercase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case__ ( self ):
__lowercase = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCAmelCase_ , "half" ):
__lowercase = module.half()
__lowercase = CycleDiffusionPipeline(**lowerCAmelCase_ )
__lowercase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowercase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowercase = pipe(**lowerCAmelCase_ )
__lowercase = output.images
__lowercase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def snake_case__ ( self ):
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def snake_case__ ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def snake_case__ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case__ ( self ):
return super().test_save_load_optional_components()
@skip_mps
def snake_case__ ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
__lowercase = init_image.resize((512, 512) )
__lowercase = "CompVis/stable-diffusion-v1-4"
__lowercase = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="scheduler" )
__lowercase = CycleDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowercase = "A black colored car"
__lowercase = "A blue colored car"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCAmelCase_ , source_prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase_ , output_type="np" , )
__lowercase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def snake_case__ ( self ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
__lowercase = init_image.resize((512, 512) )
__lowercase = "CompVis/stable-diffusion-v1-4"
__lowercase = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="scheduler" )
__lowercase = CycleDiffusionPipeline.from_pretrained(lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowercase = "A black colored car"
__lowercase = "A blue colored car"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCAmelCase_ , source_prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase_ , output_type="np" , )
__lowercase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 321
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower_vision_model"""
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=288 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = stop_gradient
__lowercase = share_layernorm
__lowercase = remove_last_layer
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("model_type" ) == "bridgetower":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower_text_model"""
def __init__( self , lowerCAmelCase_=5_0265 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=514 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_factor
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("model_type" ) == "bridgetower":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower"""
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=768 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
# TODO: remove this once the Hub files are updated.
__lowercase = kwargs.pop("text_config_dict" , lowerCAmelCase_ )
__lowercase = kwargs.pop("vision_config_dict" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
__lowercase = share_cross_modal_transformer_layers
__lowercase = hidden_act
__lowercase = hidden_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = share_link_tower_layers
__lowercase = link_tower_type
__lowercase = num_attention_heads
__lowercase = num_hidden_layers
__lowercase = tie_word_embeddings
__lowercase = init_layernorm_from_vision_encoder
if text_config is None:
__lowercase = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
__lowercase = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
__lowercase = BridgeTowerTextConfig(**lowerCAmelCase_ )
__lowercase = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 321
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase_ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, "schedulers/" ) )
UpperCAmelCase__ =self.diffusers_dir
shutil.copy(
os.path.join(A_, "src/diffusers/schedulers/scheduling_ddpm.py" ), os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py" ), )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ ="src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCAmelCase ( self, A_, A_, A_, A_=None ) -> List[str]:
UpperCAmelCase__ =comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase__ =comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase__ =black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
UpperCAmelCase__ =black.format_str(A_, mode=A_ )
UpperCAmelCase__ =os.path.join(self.diffusers_dir, "new_code.py" )
with open(A_, "w", newline="\n" ) as f:
f.write(A_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=A_ )
with open(A_, "r" ) as f:
self.assertTrue(f.read(), A_ )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(A_, A_ )
def __UpperCAmelCase ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", REFERENCE_CODE + "\n", )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", A_, )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", re.sub("DDPM", "Test", A_ ), )
# Copy consistency with a really long name
UpperCAmelCase__ ="TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""", f"""{long_class_name}SchedulerOutput""", re.sub("Bert", A_, A_ ), )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", A_, overwrite_result=re.sub("DDPM", "Test", A_ ), )
| 708
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCamelCase_ = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _UpperCAmelCase ( A ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
UpperCamelCase_ = parser.parse_args()
if args.check_lib:
UpperCamelCase_ = importlib.import_module('transformers')
UpperCamelCase_ = Path(transformers_module.__file__).parent
else:
UpperCamelCase_ = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 510
| 0
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase__ ( UpperCAmelCase_ ) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
_lowercase : Tuple = precision
_lowercase : int = ceil(precision / 14 )
_lowercase : Dict = 426880 * Decimal(10005 ).sqrt()
_lowercase : Any = 1
_lowercase : str = 13591409
_lowercase : Any = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
_lowercase : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase__ = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 322
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
_UpperCAmelCase =os.path.abspath(_lowerCamelCase )
logger.info(F"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
_UpperCAmelCase =tf.train.list_variables(_lowerCamelCase )
_UpperCAmelCase =[]
_UpperCAmelCase =[]
_UpperCAmelCase =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_UpperCAmelCase =full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(F"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
_UpperCAmelCase =name[1:]
# figure out how many levels deep the name is
_UpperCAmelCase =0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(_lowerCamelCase )
# read data
_UpperCAmelCase =tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
names.append("/".join(_lowerCamelCase ) )
arrays.append(_lowerCamelCase )
logger.info(F"Read a total of {len(_lowerCamelCase ):,} layers" )
# Sanity check
if len(set(_lowerCamelCase ) ) != 1:
raise ValueError(F"Found layer names with different depths (layer depth {list(set(_lowerCamelCase ) )})" )
_UpperCAmelCase =list(set(_lowerCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(_lowerCamelCase , _lowerCamelCase ):
_UpperCAmelCase =full_name.split("/" )
_UpperCAmelCase =model
_UpperCAmelCase =[]
for i, m_name in enumerate(_lowerCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
_UpperCAmelCase =int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
_UpperCAmelCase =getattr(_lowerCamelCase , "encoder" )
_UpperCAmelCase =getattr(_lowerCamelCase , "layer" )
_UpperCAmelCase =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "pooler" )
_UpperCAmelCase =getattr(_lowerCamelCase , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
_UpperCAmelCase =getattr(_lowerCamelCase , "token_type_embeddings" )
else:
raise ValueError(F"Unknown embedding layer with name {full_name}" )
trace.append("weight" )
_UpperCAmelCase =getattr(_lowerCamelCase , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "attention" )
_UpperCAmelCase =getattr(_lowerCamelCase , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "attention" )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "attention" )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
_UpperCAmelCase =getattr(_lowerCamelCase , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
_UpperCAmelCase =getattr(_lowerCamelCase , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
_UpperCAmelCase =getattr(_lowerCamelCase , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
_UpperCAmelCase =getattr(_lowerCamelCase , "intermediate" )
_UpperCAmelCase =getattr(_lowerCamelCase , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
_UpperCAmelCase =getattr(_lowerCamelCase , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
_UpperCAmelCase =getattr(_lowerCamelCase , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
_UpperCAmelCase =getattr(_lowerCamelCase , "weight" )
else:
logger.warning(F"Ignored {m_name}" )
# for certain layers reshape is necessary
_UpperCAmelCase =".".join(_lowerCamelCase )
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , _lowerCamelCase ) or re.match(
r"(\S+)\.attention\.output\.dense\.weight" , _lowerCamelCase ):
_UpperCAmelCase =array.reshape(pointer.data.shape )
if "kernel" in full_name:
_UpperCAmelCase =array.transpose()
if pointer.shape == array.shape:
_UpperCAmelCase =torch.from_numpy(_lowerCamelCase )
else:
raise ValueError(
F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
F" {array.shape}" )
logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
# Instantiate model
logger.info(F"Loading model based on config from {config_path}..." )
_UpperCAmelCase =BertConfig.from_json_file(_lowerCamelCase )
_UpperCAmelCase =BertModel(_lowerCamelCase )
# Load weights from checkpoint
logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
snake_case__ : List[str] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 592
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="ylacombe/bark-small"
_UpperCAmelCase =tempfile.mkdtemp()
_UpperCAmelCase ="en_speaker_1"
_UpperCAmelCase ="This is a test string"
_UpperCAmelCase ="speaker_embeddings_path.json"
_UpperCAmelCase ="speaker_embeddings"
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase =35
_UpperCAmelCase =2
_UpperCAmelCase =8
_UpperCAmelCase ={
"semantic_prompt": np.ones(_snake_case ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase =os.path.join(self.tmpdirname , "file.npz" )
np.savez(_snake_case , **_snake_case )
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase =processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
_UpperCAmelCase =processor(text=self.input_string )
_UpperCAmelCase =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 592
| 1
|
"""simple docstring"""
def lowercase_ ( _snake_case ):
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE__ : Dict = grid[0]
for row_n in range(1 ,len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = grid[row_n]
SCREAMING_SNAKE_CASE__ : Optional[Any] = fill_row(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = grid[row_n]
return grid[-1][-1]
def lowercase_ ( _snake_case ,_snake_case ):
current_row[0] += row_above[0]
for cell_n in range(1 ,len(_snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''data2vec-vision'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_55 , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_mask_token
SCREAMING_SNAKE_CASE__ : Dict = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = use_relative_position_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE__ : Dict = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : int = drop_path_rate
SCREAMING_SNAKE_CASE__ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : int = out_indices
SCREAMING_SNAKE_CASE__ : str = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Any = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : str = auxiliary_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = auxiliary_num_convs
SCREAMING_SNAKE_CASE__ : Dict = auxiliary_concat_input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = semantic_loss_ignore_index
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
| 223
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowercase:
'''simple docstring'''
__a : Optional[Any] = LEDConfig
__a : Dict = {}
__a : int = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , __a=4 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : List[str] = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : Any = max_position_embeddings
__lowerCamelCase : str = eos_token_id
__lowerCamelCase : str = pad_token_id
__lowerCamelCase : str = bos_token_id
__lowerCamelCase : int = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase : Dict = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def snake_case_ ( self ):
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase : int = prepare_led_inputs_dict(__a , __a , __a )
__lowerCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
__lowerCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def snake_case_ ( self , __a , __a ):
__lowerCamelCase : Optional[int] = TFLEDModel(config=__a ).get_decoder()
__lowerCamelCase : List[str] = inputs_dict['input_ids']
__lowerCamelCase : Dict = input_ids[:1, :]
__lowerCamelCase : Any = inputs_dict['attention_mask'][:1, :]
__lowerCamelCase : Tuple = 1
# first forward pass
__lowerCamelCase : List[str] = model(__a , attention_mask=__a , use_cache=__a )
__lowerCamelCase , __lowerCamelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : Dict = model(__a , attention_mask=__a )[0]
__lowerCamelCase : Optional[int] = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def UpperCAmelCase ( A__: Any , A__: Union[str, Any] , A__: Dict , A__: int=None , A__: str=None , A__: Tuple=None , A__: List[Any]=None , ) -> Optional[Any]:
if attention_mask is None:
__lowerCamelCase : Optional[int] = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__a : Any = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__a : str = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : Union[str, Any] = True
__a : str = False
__a : Dict = False
__a : List[Any] = False
def snake_case_ ( self ):
__lowerCamelCase : int = TFLEDModelTester(self )
__lowerCamelCase : List[str] = ConfigTester(self , config_class=__a )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[Any] = tf.zeros_like(inputs_dict['attention_mask'] )
__lowerCamelCase : Any = 2
__lowerCamelCase : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = self.model_tester.seq_length
__lowerCamelCase : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a ):
__lowerCamelCase : List[Any] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a ):
__lowerCamelCase : List[str] = [t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
__lowerCamelCase : int = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : List[Any] = model_class(__a )
__lowerCamelCase : Optional[Any] = model(self._prepare_for_class(__a , __a ) )
__lowerCamelCase : Tuple = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowerCamelCase : Any = model_class(__a )
__lowerCamelCase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase : Tuple = True
__lowerCamelCase : Dict = model_class(__a )
__lowerCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowerCamelCase : List[Any] = True
__lowerCamelCase : int = True
__lowerCamelCase : List[str] = model_class(__a )
__lowerCamelCase : Dict = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# TODO: Head-masking not yet implement
pass
def UpperCAmelCase ( A__: Union[str, Any] ) -> List[Any]:
return tf.constant(A__ , dtype=tf.intaa )
a_ : Tuple = 1e-4
@slow
@require_tf
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
__lowerCamelCase : Union[str, Any] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__lowerCamelCase : Dict = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__lowerCamelCase : Optional[int] = prepare_led_inputs_dict(model.config , __a , __a )
__lowerCamelCase : str = model(**__a )[0]
__lowerCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
__lowerCamelCase : int = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
__lowerCamelCase : Union[str, Any] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__lowerCamelCase : Dict = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__lowerCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
__lowerCamelCase : Dict = model(**__a )[0]
__lowerCamelCase : List[Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
__lowerCamelCase : Optional[Any] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 , rtol=1E-3 )
| 263
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : str = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase_ : ClassVar[Features] = Features({"""audio""": Audio()} )
lowerCamelCase_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
lowerCamelCase_ : str = "audio"
lowerCamelCase_ : str = "labels"
def __UpperCAmelCase( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
__A : int = copy.deepcopy(self )
__A : Optional[int] = self.label_schema.copy()
__A : Tuple = features[self.label_column]
__A : Optional[int] = label_schema
return task_template
@property
def __UpperCAmelCase( self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 520
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 520
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Dict = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCAmelCase ,UpperCAmelCase ) and isinstance(UpperCAmelCase ,UpperCAmelCase ):
_UpperCamelCase : Any = len(set_a.intersection(UpperCAmelCase ) )
if alternative_union:
_UpperCamelCase : List[str] = len(UpperCAmelCase ) + len(UpperCAmelCase )
else:
_UpperCamelCase : List[str] = len(set_a.union(UpperCAmelCase ) )
return intersection / union
if isinstance(UpperCAmelCase ,(list, tuple) ) and isinstance(UpperCAmelCase ,(list, tuple) ):
_UpperCamelCase : List[Any] = [element for element in set_a if element in set_b]
if alternative_union:
_UpperCamelCase : str = len(UpperCAmelCase ) + len(UpperCAmelCase )
return len(UpperCAmelCase ) / union
else:
_UpperCamelCase : int = set_a + [element for element in set_b if element not in set_a]
return len(UpperCAmelCase ) / len(UpperCAmelCase )
return len(UpperCAmelCase ) / len(UpperCAmelCase )
return None
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = {"""a""", """b""", """c""", """d""", """e"""}
lowerCAmelCase_ : Dict = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 204
| 0
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCAmelCase = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowerCAmelCase = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowerCAmelCase = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCAmelCase = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCAmelCase = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def _snake_case ( __snake_case , __snake_case ):
for tf_name, hf_name in patterns:
_UpperCamelCase = k.replace(__snake_case , __snake_case )
return k
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = BigBirdPegasusConfig(**__snake_case )
_UpperCamelCase = BigBirdPegasusForConditionalGeneration(__snake_case )
_UpperCamelCase = torch_model.state_dict()
_UpperCamelCase = {}
# separating decoder weights
_UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
_UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
_UpperCamelCase = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
_UpperCamelCase = DECODER_PATTERNS
_UpperCamelCase = rename_state_dict_key(__snake_case , __snake_case )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCamelCase = v.T
_UpperCamelCase = torch.from_numpy(__snake_case )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
_UpperCamelCase = [k.endswith(__snake_case ) for ending in KEYS_TO_IGNORE]
if any(__snake_case ):
continue
_UpperCamelCase = REMAINING_PATTERNS
_UpperCamelCase = rename_state_dict_key(__snake_case , __snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCamelCase = v.T
_UpperCamelCase = torch.from_numpy(__snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_UpperCamelCase = mapping['''model.embed_positions.weight''']
_UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
_UpperCamelCase , _UpperCamelCase = torch_model.load_state_dict(__snake_case , strict=__snake_case )
_UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _snake_case ( __snake_case ):
_UpperCamelCase = tf.train.list_variables(__snake_case )
_UpperCamelCase = {}
_UpperCamelCase = ['''global_step''']
for name, shape in tqdm(__snake_case , desc='''converting tf checkpoint to dict''' ):
_UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
_UpperCamelCase = array
return tf_weights
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = get_tf_weights_as_numpy(__snake_case )
_UpperCamelCase = convert_bigbird_pegasus(__snake_case , __snake_case )
torch_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 10
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Tuple = DebertaTokenizer
_UpperCamelCase : str = True
_UpperCamelCase : Tuple = DebertaTokenizerFast
def lowercase ( self: Optional[int] ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
UpperCamelCase_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase_ = {"unk_token": "[UNK]"}
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def lowercase ( self: Optional[int] , **_SCREAMING_SNAKE_CASE: Dict ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = "lower newer"
return input_text, output_text
def lowercase ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCamelCase_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokens + [tokenizer.unk_token]
UpperCamelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = tokenizer("Hello" , "World" )
UpperCamelCase_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
UpperCamelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.encode(
"sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase_ = tokenizer_class.from_pretrained("microsoft/deberta-base" )
UpperCamelCase_ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
UpperCamelCase_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) for seq in encoding["input_ids"]]
# fmt: off
UpperCamelCase_ = {
"input_ids": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase_ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , _SCREAMING_SNAKE_CASE )
for expected, decoded in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 371
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=() , UpperCamelCase_=None , UpperCamelCase_="no" , UpperCamelCase_="29500" ) -> Optional[Any]:
UpperCamelCase_ = False
UpperCamelCase_ = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
UpperCamelCase_ = True
elif "IPython" in sys.modules:
UpperCamelCase_ = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
UpperCamelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , UpperCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
UpperCamelCase_ = 8
UpperCamelCase_ = PrepareForLaunch(UpperCamelCase_ , distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*UpperCamelCase_ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCamelCase_ , master_addr="127.0.01" , master_port=UpperCamelCase_ , mixed_precision=UpperCamelCase_ ):
UpperCamelCase_ = PrepareForLaunch(UpperCamelCase_ , distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_ = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=() , UpperCamelCase_=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCamelCase_ , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
UpperCamelCase_ = PrepareForLaunch(UpperCamelCase_ , debug=UpperCamelCase_ )
start_processes(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="fork" )
| 371
| 1
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE__="</s>" ,SCREAMING_SNAKE_CASE__="<unk>" ,SCREAMING_SNAKE_CASE__="<pad>" ,SCREAMING_SNAKE_CASE__=1_25 ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE :List[str] = [f'''<extra_id_{i}>''' for i in range(SCREAMING_SNAKE_CASE__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__SCREAMING_SNAKE_CASE :Any = len(set(filter(lambda SCREAMING_SNAKE_CASE__ : bool('''extra_id''' in str(SCREAMING_SNAKE_CASE__ ) ) ,SCREAMING_SNAKE_CASE__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
__SCREAMING_SNAKE_CASE :int = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) else pad_token
__SCREAMING_SNAKE_CASE :str = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) else eos_token
__SCREAMING_SNAKE_CASE :int = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) else unk_token
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,extra_ids=SCREAMING_SNAKE_CASE__ ,additional_special_tokens=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Optional[int] = extra_ids
__SCREAMING_SNAKE_CASE :Optional[Any] = 2**8 # utf is 8 bits
# define special tokens dict
__SCREAMING_SNAKE_CASE :Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__SCREAMING_SNAKE_CASE :Union[str, Any] = len(self.special_tokens_encoder )
__SCREAMING_SNAKE_CASE :Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = self.vocab_size + i - n
__SCREAMING_SNAKE_CASE :Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[int]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return token_ids_a
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
return token_ids_a + token_ids_a
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = [chr(SCREAMING_SNAKE_CASE__ ) for i in text.encode('''utf-8''' )]
return tokens
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
if token in self.special_tokens_encoder:
__SCREAMING_SNAKE_CASE :str = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__SCREAMING_SNAKE_CASE :str = self.added_tokens_encoder[token]
elif len(SCREAMING_SNAKE_CASE__ ) != 1:
__SCREAMING_SNAKE_CASE :Dict = self.unk_token_id
else:
__SCREAMING_SNAKE_CASE :Optional[int] = ord(SCREAMING_SNAKE_CASE__ ) + self._num_special_tokens
return token_id
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
if index in self.special_tokens_decoder:
__SCREAMING_SNAKE_CASE :int = self.special_tokens_decoder[index]
else:
__SCREAMING_SNAKE_CASE :str = chr(index - self._num_special_tokens )
return token
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
__SCREAMING_SNAKE_CASE :Any = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
__SCREAMING_SNAKE_CASE :Optional[Any] = token.encode('''utf-8''' )
else:
__SCREAMING_SNAKE_CASE :Tuple = bytes([ord(SCREAMING_SNAKE_CASE__ )] )
bstring += tok_string
__SCREAMING_SNAKE_CASE :Union[str, Any] = bstring.decode('''utf-8''' ,errors='''ignore''' )
return string
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
return ()
| 498
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''vqvae''']
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,mel=SCREAMING_SNAKE_CASE__ ,vqvae=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ ) else 10_00
@torch.no_grad()
def __call__( self ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__=True ,) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = steps or self.get_default_steps()
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE :Any = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE :int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=SCREAMING_SNAKE_CASE__ ,device=self.device ,)
__SCREAMING_SNAKE_CASE :int = noise
__SCREAMING_SNAKE_CASE :str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = np.frombuffer(input_image.tobytes() ,dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE :Dict = (input_image / 2_55) * 2 - 1
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE__ ,0 ) ).latent_dist.sample(
generator=SCREAMING_SNAKE_CASE__ )[0]
__SCREAMING_SNAKE_CASE :Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE :Optional[int] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE :List[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE :List[Any] = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE :List[str] = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE :List[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Dict = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
else:
__SCREAMING_SNAKE_CASE :Dict = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
if isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,sample=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,)['''prev_sample''']
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,sample=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,)['''prev_sample''']
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE :Any = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE :int = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE :Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE :Any = self.vqvae.decode(SCREAMING_SNAKE_CASE__ )['''sample''']
__SCREAMING_SNAKE_CASE :Dict = (images / 2 + 0.5).clamp(0 ,1 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
__SCREAMING_SNAKE_CASE :Dict = (images * 2_55).round().astype('''uint8''' )
__SCREAMING_SNAKE_CASE :str = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(SCREAMING_SNAKE_CASE__ ,mode='''RGB''' ).convert('''L''' ) for _ in images) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE__ )[:, np.newaxis, :] ) ,**ImagePipelineOutput(SCREAMING_SNAKE_CASE__ ) )
@torch.no_grad()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE :List[Any] = (sample / 2_55) * 2 - 1
__SCREAMING_SNAKE_CASE :Optional[int] = torch.Tensor(SCREAMING_SNAKE_CASE__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
__SCREAMING_SNAKE_CASE :Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE :Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE :int = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE :str = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
__SCREAMING_SNAKE_CASE :Any = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE :Optional[int] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE :str = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> torch.Tensor:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE__ ) ,torch.flatten(SCREAMING_SNAKE_CASE__ ) ) / torch.norm(SCREAMING_SNAKE_CASE__ ) / torch.norm(SCREAMING_SNAKE_CASE__ ) )
return sin((1 - alpha) * theta ) * xa / sin(SCREAMING_SNAKE_CASE__ ) + sin(alpha * theta ) * xa / sin(SCREAMING_SNAKE_CASE__ )
| 498
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : Dict , __A : List[str]=False ) -> int:
__lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ (__A : Any , __A : Optional[int] , __A : Optional[int]=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase : Dict = """"""
else:
__lowerCAmelCase : str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : Tuple = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase : Union[str, Any] = in_proj_bias[: config.hidden_size]
__lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase : List[str] = in_proj_bias[-config.hidden_size :]
def snake_case_ (__A : Any ) -> int:
__lowerCAmelCase : Optional[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__A , __A )
def snake_case_ (__A : Dict , __A : List[Any] , __A : Union[str, Any] ) -> int:
__lowerCAmelCase : List[str] = dct.pop(__A )
__lowerCAmelCase : Optional[Any] = val
def snake_case_ () -> str:
__lowerCAmelCase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : Any = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def snake_case_ (__A : Dict , __A : Union[str, Any] , __A : List[Any]=True ) -> Any:
__lowerCAmelCase : Any = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase : Tuple = 8
# set labels if required
if not base_model:
__lowerCAmelCase : Optional[Any] = 1_0_0_0
__lowerCAmelCase : Dict = """huggingface/label-files"""
__lowerCAmelCase : Optional[Any] = """imagenet-1k-id2label.json"""
__lowerCAmelCase : str = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : List[str] = {int(__A ): v for k, v in idalabel.items()}
__lowerCAmelCase : Union[str, Any] = idalabel
__lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase : Dict = 3_8_4
__lowerCAmelCase : int = 1_5_3_6
__lowerCAmelCase : List[Any] = 1_2
__lowerCAmelCase : List[str] = 6
# load original model from torch hub
__lowerCAmelCase : List[Any] = torch.hub.load("""facebookresearch/dino:main""" , __A )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase : Optional[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(__A )
__lowerCAmelCase : int = create_rename_keys(__A , base_model=__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if base_model:
__lowerCAmelCase : Tuple = ViTModel(__A , add_pooling_layer=__A ).eval()
else:
__lowerCAmelCase : Dict = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase : Union[str, Any] = ViTImageProcessor()
__lowerCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowerCAmelCase : int = encoding["""pixel_values"""]
__lowerCAmelCase : Optional[int] = model(__A )
if base_model:
__lowerCAmelCase : Optional[int] = original_model(__A )
assert torch.allclose(__A , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__lowerCAmelCase : Tuple = original_model(__A )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 218
|
from __future__ import annotations
def snake_case_ (__A : list[int] , __A : int ) -> list[int]:
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Optional[Any] = len(__A ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase : int = i + 1
else:
__lowerCAmelCase : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 218
| 1
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
A : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase ( __magic_name__ : np.ndarray , __magic_name__ : Union[int, Iterable[int]] , __magic_name__ : bool , __magic_name__ : int ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(__magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Dict=0 , __magic_name__ : Dict=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(__magic_name__ , __magic_name__ ) else output_size
lowercase__ , lowercase__ = get_image_size(__magic_name__ )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=__magic_name__ )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=__magic_name__ )
return (new_height, new_width)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''pixel_values''']
def __init__(self : Any , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
lowercase__ = size if size is not None else {"""height""": 384, """width""": 384}
lowercase__ = get_size_dict(_UpperCAmelCase )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase__ = get_resize_output_image_size(
_UpperCAmelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> int:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(_UpperCAmelCase )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
lowercase__ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
lowercase__ = {"""pixel_values""": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Tuple] = None ) -> Dict:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_UpperCAmelCase ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(_UpperCAmelCase ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_UpperCAmelCase )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCAmelCase )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 15
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase : List[str] = StableDiffusionInpaintPipeline
lowerCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase : str = frozenset([] )
def __A ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
A__ = CLIPTextModel(UpperCamelCase_ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
A__ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCamelCase_ )
else:
A__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self ):
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
A__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
A__ = self.get_dummy_inputs(UpperCamelCase_ )
A__ = sd_pipe(**UpperCamelCase_ ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
A__ = "stabilityai/stable-diffusion-2-inpainting"
A__ = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
A__ = "Face of a yellow cat, high resolution, sitting on a park bench"
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __A ( self ):
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
A__ = "stabilityai/stable-diffusion-2-inpainting"
A__ = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
A__ = "Face of a yellow cat, high resolution, sitting on a park bench"
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __A ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
A__ = "stabilityai/stable-diffusion-2-inpainting"
A__ = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="scheduler" )
A__ = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ = "Face of a yellow cat, high resolution, sitting on a park bench"
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="np" , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 714
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Optional[int] = 16
UpperCAmelCase_ : List[Any] = 32
def UpperCamelCase ( _A : Accelerator , _A : int = 16 )-> Dict:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A : Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_A , padding="longest" , max_length=_A , pad_to_multiple_of=_A , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=_A )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _A : str , _A : List[str] )-> Union[str, Any]:
"""simple docstring"""
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_A )
A__ , A__ = get_dataloaders(_A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
_A , _A , _A , _A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_A )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_A )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_A , references=_A , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _A )
def UpperCamelCase ( )-> Any:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_A , default=_A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
A__ = parser.parse_args()
A__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 232
| 0
|
'''simple docstring'''
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[str] ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
SCREAMING_SNAKE_CASE__ : Tuple =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCamelCase__ ) )
return round(UpperCamelCase__, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase__ = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase__ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ = 'roberta'
elif args.model_type == "gpt2":
lowerCAmelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase__ = 'transformer'
lowerCAmelCase__ = model.state_dict()
lowerCAmelCase__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase__ = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase__ = F'{prefix}.embeddings.{w}.weight'
lowerCAmelCase__ = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase__ = F'{prefix}.embeddings.LayerNorm.{w}'
lowerCAmelCase__ = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowerCAmelCase__ = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase__ = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[F'lm_head.dense.{w}']
lowerCAmelCase__ = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[F'{prefix}.ln_f.{w}']
lowerCAmelCase__ = state_dict['lm_head.weight']
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 621
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a__( lowerCamelCase__ ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = """AutoTokenizer"""
def __init__( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
# add QFormer tokenizer
a : List[str] = qformer_tokenizer
def __call__( self : List[Any] , __snake_case : ImageInput = None , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Dict , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
a : List[str] = BatchFeature()
if text is not None:
a : Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
encoding.update(__snake_case )
a : List[str] = self.qformer_tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
a : Dict = qformer_text_encoding.pop('input_ids' )
a : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
a : Any = self.image_processor(__snake_case , return_tensors=__snake_case )
encoding.update(__snake_case )
return encoding
def lowercase_ ( self : Any , *__snake_case : List[Any] , **__snake_case : str ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase_ ( self : List[Any] , *__snake_case : Union[str, Any] , **__snake_case : Tuple ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase_ ( self : int ):
a : Optional[Any] = self.tokenizer.model_input_names
a : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowercase_ ( self : Any , __snake_case : List[str] , **__snake_case : Optional[Any] ):
if os.path.isfile(__snake_case ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__snake_case , exist_ok=__snake_case )
a : Dict = os.path.join(__snake_case , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__snake_case )
return super().save_pretrained(__snake_case , **__snake_case )
@classmethod
def lowercase_ ( cls : Optional[int] , __snake_case : Union[str, Any] , **__snake_case : List[str] ):
a : Tuple = AutoTokenizer.from_pretrained(__snake_case , subfolder='qformer_tokenizer' )
a : Union[str, Any] = cls._get_arguments_from_pretrained(__snake_case , **__snake_case )
args.append(__snake_case )
return cls(*__snake_case )
| 195
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__:
@staticmethod
def lowercase_ ( *__snake_case : int , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Tuple ):
a : Tuple = ObjectDetectionPipeline(model=__snake_case , image_processor=__snake_case )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase_ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] ):
a : Any = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(__snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
__snake_case , {
'score': ANY(__snake_case ),
'label': ANY(__snake_case ),
'box': {'xmin': ANY(__snake_case ), 'ymin': ANY(__snake_case ), 'xmax': ANY(__snake_case ), 'ymax': ANY(__snake_case )},
} , )
import datasets
a : Any = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
a : Tuple = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
a : List[Any] = object_detector(__snake_case , threshold=0.0 )
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for outputs in batch_outputs:
self.assertGreater(len(__snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
__snake_case , {
'score': ANY(__snake_case ),
'label': ANY(__snake_case ),
'box': {'xmin': ANY(__snake_case ), 'ymin': ANY(__snake_case ), 'xmax': ANY(__snake_case ), 'ymax': ANY(__snake_case )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowercase_ ( self : List[str] ):
pass
@require_torch
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
a : str = AutoModelForObjectDetection.from_pretrained(__snake_case )
a : str = AutoFeatureExtractor.from_pretrained(__snake_case )
a : Any = ObjectDetectionPipeline(model=__snake_case , feature_extractor=__snake_case )
a : str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
a : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def lowercase_ ( self : Optional[int] ):
a : Union[str, Any] = 'facebook/detr-resnet-50'
a : str = AutoModelForObjectDetection.from_pretrained(__snake_case )
a : List[Any] = AutoFeatureExtractor.from_pretrained(__snake_case )
a : str = ObjectDetectionPipeline(model=__snake_case , feature_extractor=__snake_case )
a : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
a : List[str] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ ( self : Any ):
a : Any = 'facebook/detr-resnet-50'
a : int = pipeline('object-detection' , model=__snake_case )
a : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
a : int = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = 0.9985
a : Optional[int] = 'facebook/detr-resnet-50'
a : List[Any] = pipeline('object-detection' , model=__snake_case )
a : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=__snake_case )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase_ ( self : Dict ):
a : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
a : Optional[int] = 0.9993
a : List[Any] = pipeline('object-detection' , model=__snake_case , threshold=__snake_case )
a : Union[str, Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , )
| 195
| 1
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
assert x is not None
assert y is not None
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
# declaring the array for storing the dp values
__lowerCamelCase : List[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCamelCase : Tuple = 1 if x[i - 1] == y[j - 1] else 0
__lowerCamelCase : List[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCamelCase : Any = ''
__lowerCamelCase , __lowerCamelCase : Optional[int] = m, n
while i > 0 and j > 0:
__lowerCamelCase : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCamelCase : Tuple = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
A__ : Any = """AGGTAB"""
A__ : Tuple = """GXTXAYB"""
A__ : str = 4
A__ : Any = """GTAB"""
A__ , A__ : Union[str, Any] = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 13
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'pegasus'
__lowerCamelCase = ['past_key_values']
__lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase=50265 , lowercase=1024 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=1024 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=1 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
| 514
| 0
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : Optional[int]=1_3 , __A : Dict=3_0 , __A : str=2 , __A : List[str]=3 , __A : Union[str, Any]=True , __A : List[Any]=True , __A : List[Any]=3_2 , __A : str=2 , __A : Any=4 , __A : Dict=3_7 , __A : Optional[int]="gelu" , __A : List[str]=0.1 , __A : List[str]=0.1 , __A : str=1_0 , __A : Any=0.0_2 , __A : str=3 , __A : Any=None , ):
snake_case__ : Optional[int] = parent
snake_case__ : str = batch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : List[Any] = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : str = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Any = (image_size // patch_size) ** 2
snake_case__ : Optional[Any] = num_patches + 1
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Dict ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , )
def _lowercase ( self : Optional[Any] , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
snake_case__ : List[Any] = TFViTModel(config=__A )
snake_case__ : Union[str, Any] = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : Optional[Any] = self.image_size // 2
snake_case__ : Dict = pixel_values[:, :, :image_size, :image_size]
snake_case__ : Any = model(__A , interpolate_pos_encoding=__A , training=__A )
snake_case__ : Any = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , __A : int , __A : List[Any] , __A : List[Any] ):
snake_case__ : Any = self.type_sequence_label_size
snake_case__ : Optional[Any] = TFViTForImageClassification(__A )
snake_case__ : List[str] = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : Tuple = self.image_size // 2
snake_case__ : str = pixel_values[:, :, :image_size, :image_size]
snake_case__ : int = model(__A , interpolate_pos_encoding=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Union[str, Any] = 1
snake_case__ : Union[str, Any] = TFViTForImageClassification(__A )
snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : str = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__ : Any = config_and_inputs
snake_case__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
a_ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = TFViTModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) )
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(__A )
snake_case__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : Dict ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _lowercase ( self : str ):
snake_case__ : List[str] = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : int ):
snake_case__ : Tuple = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : int = image_processor(images=__A , return_tensors="tf" )
# forward pass
snake_case__ : Dict = model(**__A )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 )
| 25
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase , lowerCamelCase : Tuple = analyze_text(UpperCAmelCase__)
lowerCamelCase : Dict = list(' ' + ascii_lowercase)
# what is our total sum of probabilities.
lowerCamelCase : int = sum(single_char_strings.values())
# one length string
lowerCamelCase : Dict = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase : int = single_char_strings[ch]
lowerCamelCase : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCAmelCase__) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum):.1f}''')
# two len string
lowerCamelCase : Tuple = sum(two_char_strings.values())
lowerCamelCase : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase : Any = cha + cha
if sequence in two_char_strings:
lowerCamelCase : Any = two_char_strings[sequence]
lowerCamelCase : Tuple = int(UpperCAmelCase__) / all_sum
my_sec_sum += prob * math.loga(UpperCAmelCase__)
# print second entropy
print(F'''{round(-1 * my_sec_sum):.1f}''')
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}''')
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Union[str, Any] = Counter() # type: ignore
lowerCamelCase : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCAmelCase__) - 1):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 320
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( a__ , unittest.TestCase):
_lowerCAmelCase = KandinskyVaaPipeline
_lowerCAmelCase = [
'''image_embeds''',
'''negative_image_embeds''',
]
_lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''']
_lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCAmelCase = False
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : List[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase : Tuple = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.dummy_unet
lowerCamelCase : int = self.dummy_movq
lowerCamelCase : Any = DDIMScheduler(
num_train_timesteps=1000, beta_schedule='linear', beta_start=0.0_0085, beta_end=0.012, clip_sample=A, set_alpha_to_one=A, steps_offset=1, prediction_type='epsilon', thresholding=A, )
lowerCamelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase_ ( self, A, A=0 ):
"""simple docstring"""
lowerCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(A ) ).to(A )
lowerCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
A )
if str(A ).startswith('mps' ):
lowerCamelCase : str = torch.manual_seed(A )
else:
lowerCamelCase : int = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase : Dict = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 'cpu'
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : List[str] = self.pipeline_class(**A )
lowerCamelCase : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Tuple = pipe(**self.get_dummy_inputs(A ) )
lowerCamelCase : str = output.images
lowerCamelCase : List[str] = pipe(
**self.get_dummy_inputs(A ), return_dict=A, )[0]
lowerCamelCase : Any = image[0, -3:, -3:, -1]
lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : int = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
lowerCamelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(A )
lowerCamelCase : Optional[int] = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder', torch_dtype=torch.floataa )
lowerCamelCase : Tuple = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowerCamelCase : Dict = 'red cat, 4k photo'
lowerCamelCase : Optional[int] = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Union[str, Any] = pipe_prior(
A, generator=A, num_inference_steps=5, negative_prompt='', ).to_tuple()
lowerCamelCase : Tuple = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase : List[str] = pipeline(
image_embeds=A, negative_image_embeds=A, generator=A, num_inference_steps=100, output_type='np', )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A, A )
| 320
| 1
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( UpperCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : str = RobertaTokenizer
_UpperCamelCase : Optional[Any] = RobertaTokenizerFast
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[Any] = {"cls_token": "<s>"}
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase : List[str] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = """lower newer"""
_lowerCAmelCase : int = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Dict = tokenizer.tokenize(_lowercase ) # , add_prefix_space=True)
self.assertListEqual(_lowercase , _lowercase )
_lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __A ( self ):
_lowerCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_lowercase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_lowercase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained("""roberta-base""" )
_lowerCAmelCase : str = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowercase )
_lowerCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowercase )
_lowerCAmelCase : str = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_lowercase )
_lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self ):
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = """Encode this sequence."""
_lowerCAmelCase : Dict = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
_lowerCAmelCase : int = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowercase , _lowercase )
_lowerCAmelCase : List[str] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowercase , _lowercase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_lowerCAmelCase : List[str] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
# Testing spaces after special tokens
_lowerCAmelCase : Tuple = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase )} ) # mask token has a left space
_lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(_lowercase )
_lowerCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
_lowerCAmelCase : List[str] = """Encode <mask>sequence"""
_lowerCAmelCase : int = tokenizer.encode(_lowercase )
_lowerCAmelCase : int = encoded.index(_lowercase )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowercase , _lowercase )
_lowerCAmelCase : Any = tokenizer.encode(_lowercase )
_lowerCAmelCase : Optional[int] = encoded.index(_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
def __A ( self ):
pass
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCAmelCase : str = """A, <mask> AllenNLP sentence."""
_lowerCAmelCase : Optional[Any] = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_lowerCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowerCAmelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __A ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCAmelCase : List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _lowercase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , _lowercase )
self.assertEqual(post_processor_state["""trim_offsets"""] , _lowercase )
def __A ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCAmelCase : Dict = F"{text_of_1_token} {text_of_1_token}"
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : str = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Tuple = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Optional[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : List[Any] = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Any = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
| 703
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663
| 0
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase__ = float('nan')
class A :
def __init__(self : str , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = sys.stdout
UpperCAmelCase__ = open(__UpperCAmelCase , "a" )
def __getattr__(self : str , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return getattr(self.stdout , __UpperCAmelCase )
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
self.stdout.write(__UpperCAmelCase )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __UpperCAmelCase , 0 , re.M ) )
def lowerCAmelCase_ ( __A=80, __A=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = []
# deal with critical env vars
UpperCAmelCase__ = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
UpperCAmelCase__ = os.environ.get(__A, __A )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
UpperCAmelCase__ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(__A )
# now the normal args
cmd += list(map(shlex.quote, sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
while len(__A ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(__A ) == 0 or len(__A ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__A )
UpperCAmelCase__ = ""
return "\\\n".join(__A )
def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = re.sub(r"[\\\n]+", " ", args.base_cmd )
# remove --output_dir if any and set our own
UpperCAmelCase__ = re.sub("--output_dir\s+[^\s]+", "", args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
UpperCAmelCase__ = re.sub("--overwrite_output_dir\s+", "", args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A ) -> List[Any]:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0, 100 ) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )}, )
UpperCAmelCase__ = subprocess.run(__A, capture_output=__A, text=__A )
if verbose:
print("STDOUT", result.stdout )
print("STDERR", result.stderr )
# save the streams
UpperCAmelCase__ = variation.replace(" ", "-" )
with open(Path(__A ) / f"""log.{prefix}.stdout.txt""", "w" ) as f:
f.write(result.stdout )
with open(Path(__A ) / f"""log.{prefix}.stderr.txt""", "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""", "r", encoding="utf-8" ) as f:
UpperCAmelCase__ = json.load(__A )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> int:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = f"""{id}: {variation:<{longest_variation_len}}"""
UpperCAmelCase__ = f"""{preamble}: """
UpperCAmelCase__ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__A ), desc=__A, leave=__A ):
UpperCAmelCase__ = process_run_single(
__A, __A, __A, __A, __A, __A, __A )
UpperCAmelCase__ = single_run_metrics[target_metric_key]
if not math.isnan(__A ):
metrics.append(__A )
results.append(__A )
outcome += "✓"
else:
outcome += "✘"
UpperCAmelCase__ = f"""\33[2K\r{outcome}"""
if len(__A ) > 0:
UpperCAmelCase__ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
UpperCAmelCase__ = round(mean_metrics[target_metric_key], 2 )
UpperCAmelCase__ = f"""{outcome} {mean_target}"""
if len(__A ) > 1:
results_str += f""" {tuple(round(__A, 2 ) for x in results )}"""
print(__A )
UpperCAmelCase__ = variation
return mean_metrics
else:
print(__A )
return {variation_key: variation, target_metric_key: nan}
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = pd.DataFrame(__A )
UpperCAmelCase__ = "variation"
UpperCAmelCase__ = "diff_%"
UpperCAmelCase__ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
UpperCAmelCase__ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__A ):
# as a fallback, use the minimal value as the sentinel
UpperCAmelCase__ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__A ):
UpperCAmelCase__ = df.apply(
lambda __A : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0, axis="columns", )
# re-order columns
UpperCAmelCase__ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
UpperCAmelCase__ = df.reindex(__A, axis="columns" ) # reorder cols
# capitalize
UpperCAmelCase__ = df.rename(str.capitalize, axis="columns" )
# make the cols as narrow as possible
UpperCAmelCase__ = df.rename(lambda __A : c.replace("_", "<br>" ), axis="columns" )
UpperCAmelCase__ = df.rename(lambda __A : c.replace("_", "\n" ), axis="columns" )
UpperCAmelCase__ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__A, floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__A, floatfmt=".2f" )]
print("\n\n".join(__A ) )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd", default=__A, type=__A, required=__A, help="Base cmd", )
parser.add_argument(
"--variations", default=__A, type=__A, nargs="+", required=__A, help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'", )
parser.add_argument(
"--base-variation", default=__A, type=__A, help="Baseline variation to compare to. if None the minimal target value will be used to compare against", )
parser.add_argument(
"--target-metric-key", default=__A, type=__A, required=__A, help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second", )
parser.add_argument(
"--report-metric-keys", default="", type=__A, help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples", )
parser.add_argument(
"--repeat-times", default=1, type=__A, help="How many times to re-run each variation - an average will be reported", )
parser.add_argument(
"--output_dir", default="output_benchmark", type=__A, help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked", )
parser.add_argument(
"--verbose", default=__A, action="store_true", help="Whether to show the outputs of each run or just the benchmark progress", )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.output_dir
Path(__A ).mkdir(exist_ok=__A )
UpperCAmelCase__ = get_base_command(__A, __A )
# split each dimension into its --foo variations
UpperCAmelCase__ = [list(map(str.strip, re.split(r"\|", __A ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
UpperCAmelCase__ = list(map(str.strip, map(" ".join, itertools.product(*__A ) ) ) )
UpperCAmelCase__ = max(len(__A ) for x in variations )
# split wanted keys
UpperCAmelCase__ = args.report_metric_keys.split()
# capture prints into a log file for convenience
UpperCAmelCase__ = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
UpperCAmelCase__ = Tee(__A )
print(f"""\n*** Running {len(__A )} benchmarks:""" )
print(f"""Base command: {" ".join(__A )}""" )
UpperCAmelCase__ = "variation"
UpperCAmelCase__ = []
for id, variation in enumerate(tqdm(__A, desc="Total completion: ", leave=__A ) ):
UpperCAmelCase__ = base_cmd + variation.split()
results.append(
process_run(
id + 1, __A, __A, __A, __A, args.target_metric_key, __A, args.repeat_times, __A, args.verbose, ) )
process_results(__A, args.target_metric_key, __A, args.base_variation, __A )
if __name__ == "__main__":
main()
| 486
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = 'fnet'
def __init__(self : List[str] , __UpperCAmelCase : Optional[Any]=3_2_0_0_0 , __UpperCAmelCase : List[Any]=7_6_8 , __UpperCAmelCase : Optional[Any]=1_2 , __UpperCAmelCase : Optional[int]=3_0_7_2 , __UpperCAmelCase : Tuple="gelu_new" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : str=5_1_2 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Optional[int]=1E-12 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : int=5_1_2 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Union[str, Any]=2 , **__UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_tpu_fourier_optimizations
UpperCAmelCase__ = tpu_short_seq_length
| 486
| 1
|
from ...configuration_utils import PretrainedConfig
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = 'bert-generation'
def __init__( self , lowerCAmelCase=5_0358 , lowerCAmelCase=1024 , lowerCAmelCase=24 , lowerCAmelCase=16 , lowerCAmelCase=4096 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
| 23
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23
| 1
|
"""simple docstring"""
from math import sqrt
def lowercase ( lowerCAmelCase__ : int ) -> Any:
assert isinstance(snake_case__ , snake_case__ ) and (
number >= 0
), "'number' must been an int and positive"
__a = True
# 0 and 1 are none primes.
if number <= 1:
__a = False
for divisor in range(2 , int(round(sqrt(snake_case__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__a = False
break
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'status' must been from type bool"
return status
def lowercase ( lowerCAmelCase__ : List[str] ) -> List[str]:
assert isinstance(snake_case__ , snake_case__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__a = list(range(2 , n + 1 ) )
__a = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__a = 0
# filters actual prime numbers.
__a = [x for x in begin_list if x != 0]
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type list"
return ans
def lowercase ( lowerCAmelCase__ : int ) -> Any:
assert isinstance(snake_case__ , snake_case__ ) and (n > 2), "'N' must been an int and > 2"
__a = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(snake_case__ ):
ans.append(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type list"
return ans
def lowercase ( lowerCAmelCase__ : str ) -> Any:
assert isinstance(snake_case__ , snake_case__ ) and number >= 0, "'number' must been an int and >= 0"
__a = [] # this list will be returns of the function.
# potential prime number factors.
__a = 2
__a = number
if number == 0 or number == 1:
ans.append(snake_case__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(snake_case__ ):
while quotient != 1:
if is_prime(snake_case__ ) and (quotient % factor == 0):
ans.append(snake_case__ )
quotient /= factor
else:
factor += 1
else:
ans.append(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type list"
return ans
def lowercase ( lowerCAmelCase__ : str ) -> str:
assert isinstance(snake_case__ , snake_case__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__a = 0
# prime factorization of 'number'
__a = prime_factorization(snake_case__ )
__a = max(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type int"
return ans
def lowercase ( lowerCAmelCase__ : Tuple ) -> List[str]:
assert isinstance(snake_case__ , snake_case__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__a = 0
# prime factorization of 'number'
__a = prime_factorization(snake_case__ )
__a = min(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ), "'ans' must been from type int"
return ans
def lowercase ( lowerCAmelCase__ : Any ) -> List[str]:
assert isinstance(snake_case__ , snake_case__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , snake_case__ ), "compare bust been from type bool"
return number % 2 == 0
def lowercase ( lowerCAmelCase__ : List[str] ) -> List[str]:
assert isinstance(snake_case__ , snake_case__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , snake_case__ ), "compare bust been from type bool"
return number % 2 != 0
def lowercase ( lowerCAmelCase__ : int ) -> Tuple:
assert (
isinstance(snake_case__ , snake_case__ ) and (number > 2) and is_even(snake_case__ )
), "'number' must been an int, even and > 2"
__a = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__a = get_prime_numbers(snake_case__ )
__a = len(snake_case__ )
# run variable for while-loops.
__a = 0
__a = None
# exit variable. for break up the loops
__a = True
while i < len_pn and loop:
__a = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__a = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(snake_case__ , snake_case__ )
and (len(snake_case__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
assert (
isinstance(snake_case__ , snake_case__ )
and isinstance(snake_case__ , snake_case__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__a = 0
while numbera != 0:
__a = numbera % numbera
__a = numbera
__a = rest
# precondition
assert isinstance(snake_case__ , snake_case__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
assert (
isinstance(snake_case__ , snake_case__ )
and isinstance(snake_case__ , snake_case__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__a = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__a = prime_factorization(snake_case__ )
__a = prime_factorization(snake_case__ )
elif numbera == 1 or numbera == 1:
__a = []
__a = []
__a = max(snake_case__ , snake_case__ )
__a = 0
__a = 0
__a = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__a = prime_fac_a.count(snake_case__ )
__a = prime_fac_a.count(snake_case__ )
for _ in range(max(snake_case__ , snake_case__ ) ):
ans *= n
else:
__a = prime_fac_a.count(snake_case__ )
for _ in range(snake_case__ ):
ans *= n
done.append(snake_case__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__a = prime_fac_a.count(snake_case__ )
for _ in range(snake_case__ ):
ans *= n
done.append(snake_case__ )
# precondition
assert isinstance(snake_case__ , snake_case__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowercase ( lowerCAmelCase__ : int ) -> Tuple:
assert isinstance(snake_case__ , snake_case__ ) and (n >= 0), "'number' must been a positive int"
__a = 0
__a = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(snake_case__ ):
ans += 1
# precondition
assert isinstance(snake_case__ , snake_case__ ) and is_prime(
snake_case__ ), "'ans' must been a prime number and from type int"
return ans
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> Dict:
assert (
is_prime(snake_case__ ) and is_prime(snake_case__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__a = p_number_a + 1 # jump to the next number
__a = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(snake_case__ ):
number += 1
while number < p_number_a:
ans.append(snake_case__ )
number += 1
# fetch the next prime number.
while not is_prime(snake_case__ ):
number += 1
# precondition
assert (
isinstance(snake_case__ , snake_case__ )
and ans[0] != p_number_a
and ans[len(snake_case__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowercase ( lowerCAmelCase__ : int ) -> Any:
assert isinstance(snake_case__ , snake_case__ ) and (n >= 1), "'n' must been int and >= 1"
__a = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(snake_case__ )
# precondition
assert ans[0] == 1 and ans[len(snake_case__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowercase ( lowerCAmelCase__ : Tuple ) -> int:
assert isinstance(snake_case__ , snake_case__ ) and (
number > 1
), "'number' must been an int and >= 1"
__a = get_divisors(snake_case__ )
# precondition
assert (
isinstance(snake_case__ , snake_case__ )
and (divisors[0] == 1)
and (divisors[len(snake_case__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ) -> str:
assert (
isinstance(snake_case__ , snake_case__ )
and isinstance(snake_case__ , snake_case__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__a = gcd(abs(snake_case__ ) , abs(snake_case__ ) )
# precondition
assert (
isinstance(snake_case__ , snake_case__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowercase ( lowerCAmelCase__ : Any ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ ) and (n >= 0), "'n' must been a int and >= 0"
__a = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowercase ( lowerCAmelCase__ : List[str] ) -> Tuple:
assert isinstance(snake_case__ , snake_case__ ) and (n >= 0), "'n' must been an int and >= 0"
__a = 0
__a = 1
__a = 1 # this will be return
for _ in range(n - 1 ):
__a = ans
ans += fiba
__a = tmp
return ans
| 695
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 91
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( A , A , A ):
@register_to_config
def __init__( self : Optional[int] , A : int , A : int , A : int , A : float , A : int , A : int , A : int , A : int , A : str , A : bool = False , ) -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Embedding(A , A)
_UpperCAmelCase = nn.Embedding(A , A)
_UpperCAmelCase = False
_UpperCAmelCase = nn.Dropout(p=A)
_UpperCAmelCase = TaConfig(
vocab_size=A , d_model=A , num_heads=A , d_kv=A , d_ff=A , dropout_rate=A , feed_forward_proj=A , is_decoder=A , is_encoder_decoder=A , )
_UpperCAmelCase = nn.ModuleList()
for lyr_num in range(A):
_UpperCAmelCase = TaBlock(A)
self.encoders.append(A)
_UpperCAmelCase = TaLayerNorm(A)
_UpperCAmelCase = nn.Dropout(p=A)
def _lowerCamelCase ( self : Dict , A : List[Any] , A : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.token_embedder(A)
_UpperCAmelCase = encoder_input_tokens.shape[1]
_UpperCAmelCase = torch.arange(A , device=encoder_input_tokens.device)
x += self.position_encoding(A)
_UpperCAmelCase = self.dropout_pre(A)
# inverted the attention mask
_UpperCAmelCase = encoder_input_tokens.size()
_UpperCAmelCase = self.get_extended_attention_mask(A , A)
for lyr in self.encoders:
_UpperCAmelCase = lyr(A , A)[0]
_UpperCAmelCase = self.layer_norm(A)
return self.dropout_post(A), encoder_inputs_mask
| 639
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.